diff --git "a/3176.jsonl" "b/3176.jsonl" new file mode 100644--- /dev/null +++ "b/3176.jsonl" @@ -0,0 +1,693 @@ +{"seq_id":"74317196852","text":"'''\n実験結果をエクセル形式で出力\n設問を指定→各設問ごとの性能をシードごとに出力\n指定なし → シードごとの性能を平均した結果を出力\n'''\nimport argparse\nimport logzero\nfrom logzero import logger\nimport logging\nfrom os import path\nfrom typing import List, Dict\nfrom glob import glob\nfrom collections import defaultdict\nimport json\nimport numpy as np\n\nfrom domain.prompt import prompts\n\nfrom print_result_func import *\n\nlogger.setLevel(logging.DEBUG)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-dir', type=path.abspath, help='input file dir')\n parser.add_argument(\"--suffix\", type=str,\n default=\"sufficiency_comprehensive.json\")\n parser.add_argument(\"--convert_name\", action=\"store_true\", default=False)\n\n args = parser.parse_args()\n return args\n\n\ndef print_eraser_result(dir_path: str, suffix: str, convert_name: bool):\n print(\"prompt\\ttrain size\\tattn size\\tseed\\tjusti\\tsufficiency\\tcomprehensiveness\")\n for prompt in get_prompt_list(dir_path):\n for item in get_item_list(dir_path, prompt):\n for train_size in get_train_size_list(dir_path, prompt, item):\n for attention_size in get_attention_size_list(dir_path, prompt, item, train_size):\n for seed in get_seed_list(dir_path, prompt, item, train_size, attention_size):\n prefix_file_path = f\"{dir_path}/{prompt}/{item}/{train_size}/{attention_size}/{seed}/{prompt}_{item}_{train_size}_{attention_size}_{seed}\"\n\n f_p_file_path = f\"{prefix_file_path}_{suffix}\"\n if path.isfile(f_p_file_path):\n try:\n f_p_data = json.load(open(f_p_file_path, 'r'))\n except:\n raise ValueError(f\"can't open {f_p_file_path}\")\n for justification_method in f_p_data:\n if convert_name:\n print(\n f\"{prompts[prompt].type}_{item}\", end='\\t')\n else:\n print(f\"{prompt}_{item}\", end='\\t')\n print(train_size, end='\\t')\n print(attention_size, end='\\t')\n print(seed, end='\\t')\n print(justification_method, end='\\t')\n\n print(f_p_data[justification_method]\n [\"test\"][\"sufficiency\"], end='\\t')\n print(f_p_data[justification_method]\n [\"test\"][\"comprehensiveness\"], end='\\t')\n\n print()\n\n\ndef main():\n args = parse_args()\n logger.info(args)\n print_eraser_result(args.dir, args.suffix, args.convert_name)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"cl-tohoku/Explainability_of_SAS","sub_path":"analysis/print_sufficient_comprehensive_result_for_excel.py","file_name":"print_sufficient_comprehensive_result_for_excel.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42227265312","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n=====\r\nutils\r\n=====\r\n\r\nScript : utils.py\r\n\r\nAuthor : Dan_Patterson@carleton.ca\r\n\r\nModified : 2019-06-16\r\n\r\nPurpose: tools for working with numpy arrays\r\n\r\nReferences\r\n----------\r\n``_.\r\n\r\n``_.\r\n\r\n\r\nUseage:\r\n-------\r\n\r\n**doc_func(func=None)** : see get_func and get_modu\r\n\r\n**get_func** :\r\n\r\nRetrieve function information::\r\n\r\n get_func(func, line_nums=True, verbose=True)\r\n print(art.get_func(art.main))\r\n\r\n Function: .... main ....\r\n Line number... 1334\r\n Docs:\r\n Do nothing\r\n Defaults: None\r\n Keyword Defaults: None\r\n Variable names:\r\n Source code:\r\n 0 def main():\r\n 1 '''Do nothing'''\r\n 2 pass\r\n\r\nget_modu :\r\n retrieve module info\r\n\r\n**info** :\r\n\r\nRetrieve array information::\r\n\r\n - array([(0, 1, 2, 3, 4), (5, 6, 7, 8, 9),\r\n (10, 11, 12, 13, 14), (15, 16, 17, 18, 19)],\r\n dtype=[('A', '\r\n dtype [('A', '\r\n |__name void320\r\n |__shape ()\r\n |__description\r\n |__name, itemsize\r\n |__['A', '>> from functools import wraps\r\n\r\n Example function::\r\n\r\n @time_deco # on the line above the function\r\n def some_func():\r\n ``do stuff``\r\n return None\r\n\r\n \"\"\"\r\n import time\r\n from functools import wraps\r\n\r\n @wraps(func)\r\n def wrapper(*args, **kwargs):\r\n t_0 = time.perf_counter() # start time\r\n result = func(*args, **kwargs) # ... run the function ...\r\n t_1 = time.perf_counter() # end time\r\n dt = t_1 - t_0\r\n print(\"\\nTiming function for... {}\".format(func.__name__))\r\n if result is None:\r\n result = 0\r\n print(\" Time: {: <8.2e}s for {:,} objects\".format(dt, result))\r\n return result # return the result of the function\r\n #return dt # return delta time\r\n return wrapper\r\n\r\n\r\ndef run_deco(func):\r\n \"\"\"Prints basic function information and the results of a run.\r\n\r\n Parameters\r\n ----------\r\n The following import. Uncomment the import or move it inside the script.\r\n\r\n >>> from functools import wraps\r\n\r\n Example function::\r\n\r\n @run_deco # on the line above the function\r\n def some_func():\r\n ``do stuff``\r\n return None\r\n\r\n \"\"\"\r\n from functools import wraps\r\n\r\n @wraps(func)\r\n def wrapper(*args, **kwargs):\r\n \"\"\"wrapper function\"\"\"\r\n frmt = \"\\n\".join([\"Function... {}\", \" args.... {}\",\r\n \" kwargs.. {}\", \" docs.... {}\"])\r\n ar = [func.__name__, args, kwargs, func.__doc__]\r\n print(dedent(frmt).format(*ar))\r\n result = func(*args, **kwargs)\r\n print(\"{!r:}\\n\".format(result)) # comment out if results not needed\r\n return result # for optional use outside.\r\n return wrapper\r\n\r\n\r\n# ----------------------------------------------------------------------------\r\n# ---- (1) doc_func ... code section ... ----\r\ndef doc_func(func=None, verbose=True):\r\n \"\"\"(doc_func)...Documenting code using inspect\r\n\r\n Parameters\r\n ----------\r\n func : function\r\n Function name to document, without quotes\r\n verbose : Boolean\r\n True prints the result, False returns a string of the result.\r\n\r\n Returns\r\n -------\r\n A listing of the source code with line numbers\r\n\r\n Notes\r\n -----\r\n Requires the `inspect` module\r\n\r\n Source code for::\r\n\r\n module level\r\n - inspect.getsourcelines(sys.modules[__name__])[0]\r\n\r\n function level\r\n - as a list => inspect.getsourcelines(num_41)[0]\r\n - as a string => inspect.getsource(num_41)\r\n\r\n file level\r\n - script = sys.argv[0]\r\n\r\n \"\"\"\r\n def demo_func():\r\n \"\"\"dummy...\r\n : Demonstrates retrieving and documenting module and function info.\r\n \"\"\"\r\n def sub():\r\n \"\"\"sub in dummy\"\"\"\r\n print(\"sub\")\r\n return None\r\n #\r\n import inspect\r\n if func is None:\r\n func = demo_func\r\n if not inspect.isfunction(func):\r\n out = \"\\nError... `{}` is not a function, but is of type... {}\\n\"\r\n print(out.format(func.__name__, type(func)))\r\n return None\r\n script2 = sys.argv[0] # a useful way to get a file's name\r\n lines, line_num = inspect.getsourcelines(func)\r\n code = \"\".join([\"{:4d} {}\".format(idx+line_num, line)\r\n for idx, line in enumerate(lines)])\r\n nmes = ['args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',\r\n 'kwonlydefaults', 'annotations']\r\n f = inspect.getfullargspec(func)\r\n f_args = \"\\n\".join([str(i) for i in list(zip(nmes, list(f)))])\r\n args = [line_num, code,\r\n inspect.getcomments(func),\r\n inspect.isfunction(func),\r\n inspect.ismethod(func),\r\n inspect.getmodulename(script2),\r\n f_args]\r\n frmt = \"\"\"\r\n :----------------------------------------------------------------------\r\n :---- doc_func(func) ----\r\n :Code for a function on line...{}...\r\n :\r\n {}\r\n Comments preceeding function\r\n {}\r\n function?... {} ... or method? {}\r\n Module name... {}\r\n Full specs....\r\n {}\r\n ----------------------------------------------------------------------\r\n \"\"\"\r\n out = (dedent(frmt)).format(*args)\r\n if verbose:\r\n print(out)\r\n else:\r\n return out\r\n\r\n\r\n# ----------------------------------------------------------------------\r\n# ---- (2) get_func .... code section ----\r\ndef get_func(func, line_nums=True, verbose=True):\r\n \"\"\"Get function information (ie. for a def)\r\n\r\n Parameters\r\n ----------\r\n >>> from textwrap import dedent, indent, wrap\r\n >>> import inspect\r\n\r\n Returns\r\n -------\r\n The function information includes arguments and source code.\r\n A string is returned for printing.\r\n\r\n Notes\r\n -----\r\n Import the module containing the function and put the object name in\r\n without quotes...\r\n\r\n >>> from arraytools.utils import get_func\r\n >>> get_func(get_func) # returns this source code etc.\r\n \"\"\"\r\n frmt = \"\"\"\r\n :-----------------------------------------------------------------\r\n :Function: .... {} ....\r\n :Line number... {}\r\n :Docs:\r\n {}\r\n :Defaults: {}\r\n :Keyword Defaults: {}\r\n :Variable names:\r\n {}\\n\r\n :Source code:\r\n {}\r\n :\r\n :-----------------------------------------------------------------\r\n \"\"\"\r\n import inspect # required if not imported at the top\r\n # from textwrap import dedent, wrap\r\n\r\n if not inspect.isfunction(func):\r\n out = \"\\nError... `{}` is not a function, but is of type... {}\\n\"\r\n print(out.format(func.__name__, type(func)))\r\n return None\r\n\r\n lines, ln_num = inspect.getsourcelines(func)\r\n if line_nums:\r\n code = \"\".join([\"{:4d} {}\".format(idx + ln_num, line)\r\n for idx, line in enumerate(lines)])\r\n else:\r\n code = \"\".join([\"{}\".format(line) for line in lines])\r\n\r\n vars_ = \", \".join([i for i in func.__code__.co_varnames])\r\n vars_ = wrap(vars_, 50)\r\n vars_ = \"\\n\".join([i for i in vars_])\r\n args = [func.__name__, ln_num, dedent(func.__doc__), func.__defaults__,\r\n func.__kwdefaults__, indent(vars_, \" \"), code]\r\n code_mem = dedent(frmt).format(*args)\r\n if verbose:\r\n print(code_mem)\r\n else:\r\n return code_mem\r\n\r\n\r\n# ----------------------------------------------------------------------\r\n# ---- (3) get_modu .... code section ----\r\ndef get_modu(obj, code=False, verbose=True):\r\n \"\"\"Get module (script) information, including source code for\r\n documentation purposes.\r\n\r\n Parameters\r\n ----------\r\n >>> from textwrap import dedent, indent\r\n >>> import inspect\r\n\r\n Returns\r\n -------\r\n A string is returned for printing. It will be the whole module\r\n so use with caution.\r\n\r\n Notes\r\n -----\r\n Useage::\r\n\r\n >>> from arraytools.utils import get_modu\r\n >>> get_modu(tools, code=False, verbose=True)\r\n >>> # No quotes around module name, code=True for module code\r\n\r\n \"\"\"\r\n frmt = \"\"\"\r\n :-----------------------------------------------------------------\r\n :Module: .... {} ....\r\n :------\r\n :File: ......\r\n {}\\n\r\n :Docs: ......\r\n {}\\n\r\n :Members: .....\r\n {}\r\n \"\"\"\r\n frmt0 = \"\"\"\r\n :{}\r\n :-----------------------------------------------------------------\r\n \"\"\"\r\n frmt1 = \"\"\"\r\n :Source code: .....\r\n {}\r\n :\r\n :-----------------------------------------------------------------\r\n \"\"\"\r\n import inspect\r\n # from textwrap import dedent # required if not immported initially\r\n\r\n if not inspect.ismodule(obj):\r\n out = \"\\nError... `{}` is not a module, but is of type... {}\\n\"\r\n print(out.format(obj.__name__, type(obj)))\r\n return None\r\n if code:\r\n lines, _ = inspect.getsourcelines(obj)\r\n frmt = frmt + frmt1\r\n code = \"\".join([\"{:4d} {}\".format(idx, line)\r\n for idx, line in enumerate(lines)])\r\n else:\r\n lines = code = \"\"\r\n frmt = frmt + frmt0\r\n memb = [i[0] for i in inspect.getmembers(obj)]\r\n args = [obj.__name__, obj.__file__, obj.__doc__, memb, code]\r\n mod_mem = dedent(frmt).format(*args)\r\n if verbose:\r\n print(mod_mem)\r\n else:\r\n return mod_mem\r\n\r\n# ----------------------------------------------------------------------\r\n# ---- (4) dirr .... code section ----\r\ndef dirr(obj, colwise=False, cols=4, sub=None, prn=True):\r\n \"\"\"A formatted `dir` listing of an object, module, function... anything you\r\n can get a listing for.\r\n\r\n Source, arraytools.py_tools has a pure python equivalent\r\n\r\n Also, arraytools `__init__._info()` has an abbreviated version\r\n\r\n Parameters\r\n ----------\r\n colwise : boolean\r\n `True` or `1`, otherwise, `False` or `0`\r\n cols : number\r\n pick a size to suit\r\n sub : text\r\n sub array with wildcards\r\n\r\n - `arr\\*` : begin with `arr`\r\n - `\\*arr` : endswith `arr` or\r\n - `\\*arr\\*`: contains `arr`\r\n prn : boolean\r\n `True` for print or `False` to return output as string\r\n\r\n Returns\r\n -------\r\n A directory listing of a module's namespace or a part of it if the\r\n `sub` option is specified.\r\n\r\n Notes\r\n -----\r\n See the `inspect` module for possible additions like `isfunction`,\r\n `ismethod`, `ismodule`\r\n\r\n Examples::\r\n\r\n dirr(art, colwise=True, cols=3, sub=None, prn=True) # all columnwise\r\n dirr(art, colwise=True, cols=3, sub='arr', prn=True) # just the `arr`s\r\n\r\n (001) _arr_common arr2xyz arr_json\r\n (002) arr_pnts arr_polygon_fc arr_polyline_fc\r\n (003) array2raster array_fc\r\n (004) array_struct arrays_cols\r\n \"\"\"\r\n err = \"\"\"\r\n ...No matches found using substring . `{0}`\r\n ...check with wildcards, *, ... `\\*abc\\*`, `\\*abc`, `abc\\*`\r\n\r\n \"\"\"\r\n d_arr = dir(obj)\r\n a = np.array(d_arr)\r\n dt = a.dtype.descr[0][1]\r\n if sub not in (None, '', ' '):\r\n start = [0, 1][sub[0] == \"*\"]\r\n end = [0, -1][sub[-1] == \"*\"]\r\n if not start and abs(end):\r\n a = [i for i in d_arr\r\n if i.startswith(sub[start:end], start, len(i))]\r\n elif start and abs(end):\r\n a = [i for i in d_arr\r\n if sub[1:4] in i[:len(i)]]\r\n elif abs(end):\r\n sub = sub.replace(\"*\", \"\")\r\n a = [i for i in d_arr\r\n if i.endswith(sub, start, len(i))]\r\n else:\r\n a = []\r\n if len(a) == 0:\r\n print(dedent(err).format(sub))\r\n return None\r\n num = max([len(i) for i in a])\r\n else:\r\n num = int(\"\".join([i for i in dt if i.isdigit()]))\r\n frmt = (\"{{!s:<{}}} \".format(num)) * cols\r\n if colwise:\r\n z = np.array_split(a, cols)\r\n zl = [len(i) for i in z]\r\n N = max(zl)\r\n e = np.empty((N, cols), dtype=z[0].dtype)\r\n for i in range(cols):\r\n n = min(N, zl[i])\r\n e[:n, i] = z[i]\r\n else:\r\n csze = len(a) / cols\r\n rows = int(csze) + (csze % 1 > 0)\r\n z = np.array_split(a, rows)\r\n e = np.empty((len(z), cols), dtype=z[0].dtype)\r\n N = len(z)\r\n for i in range(N):\r\n n = min(cols, len(z[i]))\r\n e[i, :n] = z[i][:n]\r\n if hasattr(obj, '__name__'):\r\n args = [\"-\"*70, obj.__name__, obj]\r\n else:\r\n args = [\"-\"*70, type(obj), \"np version\"]\r\n txt_out = \"\\n{}\\n| dir({}) ...\\n| {}\\n-------\".format(*args)\r\n cnt = 1\r\n for i in e:\r\n txt_out += \"\\n ({:>03.0f}) {}\".format(cnt, frmt.format(*i))\r\n cnt += cols\r\n if prn:\r\n print(txt_out)\r\n else:\r\n return txt_out\r\n\r\n\r\n# ----------------------------------------------------------------------\r\n# ---- (5) wrapper .... code section ----\r\ndef _wrapper(a, wdth=70):\r\n \"\"\"Wrap stuff using textwrap.wrap\r\n\r\n Notes:\r\n -----\r\n TextWrapper class\r\n __init__(self, width=70, initial_indent='', subsequent_indent='',\r\n expand_tabs=True, replace_whitespace=True,\r\n fix_sentence_endings=False, break_long_words=True,\r\n drop_whitespace=True, break_on_hyphens=True, tabsize=8,\r\n *, max_lines=None, placeholder=' [...]')\r\n \"\"\"\r\n if isinstance(a, np.ndarray):\r\n txt = [str(i) for i in a.tolist()]\r\n txt = \", \".join(txt)\r\n elif isinstance(a, (list, tuple)):\r\n txt = \", \".join([str(i) for i in a])\r\n txt = \"\\n\".join(wrap(txt, width=wdth))\r\n return txt\r\n\r\n\r\ndef _utils_help_():\r\n \"\"\"arraytools.utils help...\r\n\r\n Function list follows:\r\n \"\"\"\r\n _hf = \"\"\"\r\n :-------------------------------------------------------------------:\r\n : ---- arrtools functions (loaded as 'art') ----\r\n : ---- from utils.py\r\n (1) doc_func(func=None)\r\n documenting code using inspect\r\n (2) get_func(obj, line_nums=True, verbose=True)\r\n pull in function code\r\n (3) get_modu(obj)\r\n pull in module code\r\n (4) dirr(a) object info\r\n (5) wrapper(a) format objects as a string\r\n :-------------------------------------------------------------------:\r\n \"\"\"\r\n print(dedent(_hf))\r\n\r\n# ----------------------------------------------------------------------\r\n# .... final code section producing the featureclass and extendtable\r\n\r\n\r\n# ----------------------------------------------------------------------\r\n# __main__ .... code section\r\nif __name__ == \"__main__\":\r\n # print the script source name.\r\n testing = True\r\n print('\\n{} in source script... {}'.format(__name__, script))\r\n # parameters here\r\nelse:\r\n testing = False\r\n # parameters here\r\n","repo_name":"Dan-Patterson/arraytools","sub_path":"arraytools/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":16690,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"24440533394","text":"from collections import defaultdict\n\ndef solution(gems):\n left, right = 0, 0\n gems_size, kind_size, d = len(gems), len(set(gems)), defaultdict(int)\n answer = [0, gems_size + 1]\n \n while right < gems_size:\n d[gems[right]] += 1\n right += 1\n \n if len(d) == kind_size:\n while left < right:\n if d[gems[left]] > 1:\n d[gems[left]] -= 1\n left += 1\n elif right - left-1 < answer[1] - answer[0]:\n answer = [left+1, right]\n break\n else:\n break\n \n return answer\n","repo_name":"bassyu/ps","sub_path":"programmers/3_보석_쇼핑.py","file_name":"3_보석_쇼핑.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40832936007","text":"import numpy as np\nimport scipy.io\n\ndef im2mat(img):\n r\"\"\" \n\n Vectorize image img \n\n Parameters\n ----------\n img : array-like, shape (n, n, nb_canaux)\n\n Returns\n -------\n r : array-like, shape (nb_canaux, N)\n \"\"\"\n nb_canaux = img.shape[2]\n N = (img.shape[0])**2\n r = np.zeros((nb_canaux, N))\n for c in range(nb_canaux):\n r[c, :] = img[:, :, c].reshape((1, N))\n return r\n\n\ndef mat2im(F):\n r\"\"\" \n\n DeVectorize image img \n\n Parameters\n ----------\n F : array-like, shape (nb_canaux, N)\n\n Returns\n -------\n img_f : array-like, shape (n, n, nb_canaux)\n \"\"\"\n n = round(np.sqrt(F.shape[1]))\n img_f = np.zeros((n, n, F.shape[0]))\n for c in range(F.shape[0]):\n img_f[:, :, c] = F[c, :].reshape(n, n)\n return img_f\n\n\ndef pos_xy(n):\n r\"\"\" \n\n Create an array with position x and y of each pixel on the 2D grid\n Parameters\n ----------\n n : int\n num of pixels in each direction of the image x,y\n\n Returns\n -------\n pos : array-like, shape (2, n*n)\n spectral bands with the position of each pixel on the x,y 2D-grid\n \"\"\"\n pos = np.zeros((2, n*n))\n for j in range(n*n):\n pos[:, j] = np.array([j//n, j % n])\n return pos\n\n\ndef add_posvect(V):\n r\"\"\" \n\n Add position vector from pos_xy(n) to the image V\n\n Parameters\n ----------\n n : int\n num of pixels in each direction of the image x,y\n\n Returns\n -------\n pos : array-like, shape (2, n*n)\n spectral bands with the position of each pixel on the x,y 2D-grid\n \"\"\"\n N = V.shape[1]\n n = round(np.sqrt(N))\n return np.vstack((V, pos_xy(n)))\n\n\ndef interpo(Iv, d):\n r\"\"\" \n\n Interpolate image Iv building image Iint\n\n Parameters\n ----------\n Iv : array-like, shape (lh,ph)\n image to interpolate\n d : int \n squared-root of interpolation ration d^2=pm/ph\n Returns\n -------\n Iint : array-like, shape (lh,pm)\n interpolated image\n \"\"\"\n Im = mat2im(Iv)\n U = scipy.ndimage.zoom(Im, [d, d, 1])\n Iint = im2mat(U)\n return Iint\n\ndef torgb(im):\n r\"\"\" \n\n Extract RGB image from HS image for example image\n\n Parameters\n ----------\n im : array-like, shape (l,p)\n HS image\n\n Returns\n -------\n Irgb : array-like, shape (l,p)\n RGB image\n \"\"\"\n r = mat2im(im[26,:].reshape(1,im.shape[1]))\n g = mat2im(im[19,:].reshape(1,im.shape[1]))\n b = mat2im(im[10,:].reshape(1,im.shape[1]))\n r = r / np.max(r)\n g = g / np.max(g)\n b = b / np.max(b)\n Irgb = np.dstack((r,g,b))\n return Irgb\n\ndef MSE_image(x,y):\n r\"\"\"\n Mean Squared error between image x and image y\n \"\"\"\n return np.sum((x-y)**2)","repo_name":"JeanMljc/Sliced-wasserstein","sub_path":"image_proscess.py","file_name":"image_proscess.py","file_ext":"py","file_size_in_byte":2736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23639470313","text":"import openpyxl\nfrom openpyxl.styles import Font, Alignment\nfrom gkcore.models.meta import gk_api\nfrom pyramid.response import Response\n\n# from io import BytesIO\nimport io\n\n\ndef godown_list(self):\n \"\"\"Godown List Spreadsheet\n\n This function returns a spreadsheet form of List of Godowns Report.\n The spreadsheet in XLSX format is generated by the backend and sent in base64 encoded format.\n It is decoded and returned along with mime information.\n\n params\n ======\n fystart\n fyend\n orgname\n \"\"\"\n try:\n header = {\"gktoken\": self.request.headers[\"gktoken\"]}\n result = gk_api(\"/godown\", header, self.request)[\"gkresult\"]\n fystart = str(self.request.params[\"fystart\"])\n fyend = str(self.request.params[\"fyend\"])\n orgname = str(self.request.params[\"orgname\"])\n # A workbook is opened.\n godownwb = openpyxl.Workbook()\n # The new sheet is the active sheet as no other sheet exists. It is set as value of variable - sheet.\n sheet = godownwb.active\n # Title of the sheet and width of columns are set.\n sheet.title = \"List of Godowns\"\n\n sheet.column_dimensions[\"A\"].width = 8\n sheet.column_dimensions[\"B\"].width = 18\n sheet.column_dimensions[\"C\"].width = 36\n sheet.column_dimensions[\"D\"].width = 24\n sheet.column_dimensions[\"E\"].width = 16\n sheet.column_dimensions[\"F\"].width = 16\n\n # Cells of first two rows are merged to display organisation details properly.\n\n sheet.merge_cells(\"A1:F2\")\n # Font and Alignment of cells are set. Each cell can be identified using the cell index - column name and row number.\n sheet[\"A1\"].font = Font(name=\"Liberation Serif\", size=\"16\", bold=True)\n sheet[\"A1\"].alignment = Alignment(horizontal=\"center\", vertical=\"center\")\n # Organisation name and financial year are displayed.\n sheet[\"A1\"] = orgname + \" (FY: \" + fystart + \" to \" + fyend + \")\"\n sheet[\"A3\"].font = Font(name=\"Liberation Serif\", size=\"14\", bold=True)\n sheet[\"A3\"].alignment = Alignment(horizontal=\"center\", vertical=\"center\")\n sheet[\"A3\"] = \"List of Godowns\"\n sheet.merge_cells(\"A3:F3\")\n sheet[\"A4\"] = \"Sr. No.\"\n sheet[\"B4\"] = \"Godown Name\"\n sheet[\"C4\"] = \"Address\"\n sheet[\"D4\"] = \"Contact Person\"\n sheet[\"E4\"] = \"Contact Number\"\n sheet[\"F4\"] = \"Status\"\n titlerow = sheet.row_dimensions[4]\n titlerow.font = Font(name=\"Liberation Serif\", size=12, bold=True)\n row = 5\n srno = 1\n for godown in result:\n sheet[\"A\" + str(row)] = srno\n sheet[\"A\" + str(row)].alignment = Alignment(horizontal=\"left\")\n sheet[\"A\" + str(row)].font = Font(\n name=\"Liberation Serif\", size=\"12\", bold=False\n )\n sheet[\"B\" + str(row)] = godown[\"goname\"]\n sheet[\"B\" + str(row)].font = Font(\n name=\"Liberation Serif\", size=\"12\", bold=False\n )\n sheet[\"C\" + str(row)] = godown[\"goaddr\"] + \" , \" + godown[\"state\"]\n sheet[\"C\" + str(row)].font = Font(\n name=\"Liberation Serif\", size=\"12\", bold=False\n )\n sheet[\"D\" + str(row)] = godown[\"contactname\"]\n sheet[\"D\" + str(row)].font = Font(\n name=\"Liberation Serif\", size=\"12\", bold=False\n )\n sheet[\"E\" + str(row)] = godown[\"gocontact\"]\n sheet[\"E\" + str(row)].font = Font(\n name=\"Liberation Serif\", size=\"12\", bold=False\n )\n sheet[\"F\" + str(row)] = godown[\"godownstatus\"]\n sheet[\"F\" + str(row)].font = Font(\n name=\"Liberation Serif\", size=\"12\", bold=False\n )\n\n row = row + 1\n srno += 1\n output = io.BytesIO()\n godownwb.save(output)\n contents = output.getvalue()\n output.close()\n headerList = {\n \"Content-Type\": \"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\",\n \"Content-Length\": len(contents),\n \"Content-Disposition\": \"attachment; filename=report.xlsx\",\n \"X-Content-Type-Options\": \"nosniff\",\n \"Set-Cookie\": \"fileDownload=true; path=/ [;HttpOnly]\",\n }\n # headerList = {'Content-Type':'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' ,'Content-Length': len(contents),'Content-Disposition': 'attachment; filename=report.xlsx','Set-Cookie':'fileDownload=true; path=/'}\n return Response(contents, headerlist=list(headerList.items()))\n except:\n return {\"gkstatus\": 3}\n","repo_name":"gnukhata/gkcore","sub_path":"gkcore/views/spreadsheets/godown.py","file_name":"godown.py","file_ext":"py","file_size_in_byte":4644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31389387039","text":"import smbus as smbus\nimport RPi.GPIO as GPIO\nimport time\n\nbus = smbus.SMBus(1)\ndef setup(Addr):\n global address\n address = Addr\nmaxLux=0\nif __name__ == \"__main__\":\n setup(0x48)\n while True:\n bus.write_byte(address,0x40)\n bus.read_byte(address)\n tmp=bus.read_byte(address)\n if maxLux 1, 'Input file must have an extension'\nfile_ext = file.split('.')[-1]\nassert file_ext == 'xml' or file_ext == 'yxmd', 'Input file must be .xml or .yxmd'\nif file_ext == 'yxmd':\n xml = file.split('.')[0] + '.xml'\n copyfile(file, xml)\n tree = ET.parse(xml)\nelse:\n tree = ET.parse(file)\n\n# Output file\noutput_file_name = sys.argv[2]\nassert len(output_file_name.split('.')) > 1, 'Output file must have an extension'\noutput_file_ext = output_file_name.split('.')[-1]\nassert output_file_ext == 'csv', 'Output file must be .csv'\n\nroot = tree.getroot()\n\nlst = []\nfor x in root.iter('Node'):\n node = NodeElement(x)\n lst.append(node.data)\n\nkeys = lst[0].keys()\nwith open(output_file_name, 'w') as output_file:\n dict_writer = csv.DictWriter(output_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(lst)\n","repo_name":"shiv-io/Alteryx-Metadata-Parser","sub_path":"examine_alteryx_workflow.py","file_name":"examine_alteryx_workflow.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"36440515887","text":"# -*- coding:utf-8 -*-\nn, m = map(int, input().split())\nrelations = [[0 for i in range(n)] for j in range(n)]\nfor i in range(m):\n x, y = map(int, input().split())\n relations[x-1][y-1] = 1\n relations[y-1][x-1] = 1\n relations[x-1][x-1] = 1\n relations[y-1][y-1] = 1\nprint(relations)\n\nresult = 1\n\nfor i in range(2**n):\n people = []\n for j in reversed(range(1,n+1)):\n if i&(1< THRESHOLD\n\n def is_in_timerange(self, time_range):\n ts_now = round(datetime.timestamp(datetime.now()) * 1000)\n ts_lower_bound = ts_now - interval_to_ms[self.interval] * time_range\n ts_candlestick = datetime.timestamp(datetime.strptime(self.time_open, '%Y-%m-%d %H:%M')) * 1000\n\n return ts_candlestick > ts_lower_bound\n\n def get_support_area(self, candles):\n \"\"\"\n Decide whether discussed candlestick makes resistance level or not\n and return area between opening / closing of the candle and high / low of the candle.\n\n Following statement is applied for both support and resistance levels\n and these levels are made by `fractals` - 5 candles patterns.\n For support, the third candle has the lowest `low` price, the previous candles\n have decreasing lows and the next candles have increasing lows.\n Then the low of the third candle is the support level.\n The same concept can be applied to resistance levels, where the third has the highest `high`\n of the five ones.\n \"\"\"\n\n if self.idx - 2 < 0 or self.idx + 2 >= self.max_candles_no:\n return\n\n candidates = candles[self.idx - 2: self.idx + 3]\n if self.low < candidates[1].get_data()['price_low'] < candidates[0].get_data()['price_low'] and \\\n self.low < candidates[3].get_data()['price_low'] < candidates[4].get_data()['price_low']:\n\n body_lower = self.high\n for c in candidates:\n body_c = min([c.get_data()['price_close'], c.get_data()['price_open']])\n if body_c < body_lower:\n body_lower = body_c\n\n self.is_support = True\n self.support_lower = self.low\n self.support_upper = body_lower\n\n self.data.update({\n 'support_lower': self.support_lower,\n 'support_upper': self.support_upper\n })\n\n return {\n 'support_lower': self.support_lower,\n 'support_upper': self.support_upper\n }\n\n def get_resistance_area(self, candles):\n \"\"\"Decide whether discussed candlestick makes resistance level or not.\"\"\"\n\n if self.idx - 2 < 0 or self.idx + 2 >= self.max_candles_no:\n return\n\n candidates = candles[self.idx - 2: self.idx + 3]\n if self.high > candidates[1].get_data()['price_high'] > candidates[0].get_data()['price_high'] and \\\n self.high > candidates[3].get_data()['price_high'] > candidates[4].get_data()['price_high']:\n\n body_upper = self.low\n for c in candidates:\n body_c = max([c.get_data()['price_close'], c.get_data()['price_open']])\n if body_c > body_upper:\n body_upper = body_c\n\n self.is_resistance = True\n self.resistance_lower = body_upper\n self.resistance_upper = self.high\n\n self.data.update({\n 'resistance_lower': self.resistance_lower,\n 'resistance_upper': self.resistance_upper\n })\n\n return {\n 'resistance_lower': self.resistance_lower,\n 'resistance_upper': self.resistance_upper\n }\n\n","repo_name":"tomsonn/stonks","sub_path":"classes/candlestick.py","file_name":"candlestick.py","file_ext":"py","file_size_in_byte":4522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37951101349","text":"import json\n\nimport pytest\nfrom django.contrib.auth.models import AnonymousUser\nfrom django.contrib.sessions.middleware import SessionMiddleware\nfrom django.test import RequestFactory\nfrom exchange_backend import views\nfrom mixer.backend.django import mixer\nfrom rest_framework.request import Request\nfrom exchange_backend.views import WithdrawmoneyView, ProfileView, User\nfrom utils import get_restframework_request\n\npytestmark = pytest.mark.django_db\n\n\nclass TestRegisterPage:\n def test_registerview(self):\n req = RequestFactory().get('register/')\n resp = views.RegisterView.as_view()(req)\n assert resp.status_code == 200\n\n def test_registeruser(self):\n body = {\n \"username\": \"test\",\n \"password\": \"test@123\",\n \"password2\": \"test@123\",\n \"email\": \"test@example.com\",\n \"first_name\": \"test\",\n \"last_name\": \"user\"\n }\n body = json.dumps(body)\n req = RequestFactory(content_type=\"application/json\")\n req = req.post('register/', content_type=\"application/json\", data=body)\n resp = views.RegisterView.as_view()(req)\n assert resp.status_code == 302, \"As after registration it will redirect to Home page\"\n assert '' in resp.url\n assert User.objects.filter(username=\"test\").first() is not None, \"As user is Sucessfully created\"\n\n\nclass TestLoginPage:\n def test_loginview(self):\n req = RequestFactory().get('login/')\n resp = views.LoginView.as_view()(req)\n assert resp.status_code == 200\n\n def test_register_loginuser(self):\n body = {\n \"username\": \"test\",\n \"password\": \"test@123\",\n \"password2\": \"test@123\",\n \"email\": \"test@example.com\",\n \"first_name\": \"test\",\n \"last_name\": \"user\"\n }\n body = json.dumps(body)\n req = RequestFactory(content_type=\"application/json\")\n req = req.post('register/', content_type=\"application/json\", data=body)\n resp = views.RegisterView.as_view()(req)\n assert resp.status_code == 302, \"As after registration it will redirect to Home page\"\n assert '' in resp.url\n assert User.objects.filter(username=\"test\").first() is not None, \"As user is Sucessfully created\"\n body = {\n \"username\": \"test\",\n \"password\": \"test@123\",\n }\n body = json.dumps(body)\n req = get_restframework_request(\"login/\", body)\n resp = views.LoginView.as_view()(req)\n assert resp.status_code == 302, \"As after Success Login it will redirect to Home page\"\n assert resp.url == '/', \"As after Successful Login it will redirect to Home page\"\n body = {\n \"username\": \"wronguser\",\n \"password\": \"test@123\",\n }\n body = json.dumps(body)\n req = get_restframework_request(\"login/\", body)\n resp = views.LoginView.as_view()(req)\n assert resp.status_code == 401, \"unauthorized Status code\"\n body = {\n \"username\": \"test\",\n \"password\": \"test@123\",\n }\n body = json.dumps(body)\n req = get_restframework_request(\"login/\", body)\n resp = views.LoginView.as_view()(req)\n assert resp.status_code == 302, \"Redirect Status code\"\n assert resp.url == '/', \"As after Successful Login it will redirect to Home page\"\n\n\nclass TestLogoutView:\n def test_logoutuser(self):\n user = mixer.blend('exchange_backend.user')\n req = get_restframework_request(\"logout/\", {})\n req.user = user\n resp = views.LogoutView.as_view()(req)\n assert resp.status_code == 302, \"As after Logout it will redirect to Home page\"\n assert '' in resp.url\n\n\nclass TestHomePage:\n def test_homeview(self):\n req = RequestFactory().get('/')\n req.user = AnonymousUser()\n resp = views.ServeFrontend.as_view()(req)\n assert resp.status_code == 200\n\n\nclass TestTransferPage:\n def test_transferview(self):\n req = RequestFactory().get('transfer_view/')\n user = mixer.blend('exchange_backend.user')\n req.user = user\n resp = views.TransferView.as_view()(req)\n assert resp.status_code == 200\n\n def test_transferviewAnonymus(self):\n req = RequestFactory().get('transfer_view/')\n req.user = AnonymousUser()\n resp = views.TransferView.as_view()(req)\n assert resp.status_code == 302\n assert 'login' in resp.url\n\n\nclass TestAddMoneyPage:\n def test_addmoneyview(self):\n req = RequestFactory().get('addmoney/')\n user = mixer.blend('exchange_backend.user')\n req.user = user\n resp = views.AddmoneyView.as_view()(req)\n assert resp.status_code == 200\n\n def test_addmoneyviewAnonymus(self):\n req = RequestFactory().get('addmoney/')\n req.user = AnonymousUser()\n resp = views.AddmoneyView.as_view()(req)\n assert resp.status_code == 302\n assert 'login' in resp.url\n\n\nclass TestWithdrawPage:\n def test_withdrawview(self):\n req = RequestFactory().get('withdraw/')\n user = mixer.blend('exchange_backend.user')\n req.user = user\n resp = WithdrawmoneyView.as_view()(req)\n assert resp.status_code == 200\n\n def test_withdrawAnonymus(self):\n req = RequestFactory().get('withdraw/')\n req.user = AnonymousUser()\n resp = views.WithdrawmoneyView.as_view()(req)\n assert resp.status_code == 302\n assert 'login' in resp.url\n\n\nclass TestProfilePage:\n def test_profileview(self):\n req = RequestFactory().get('profile/')\n user = mixer.blend('exchange_backend.user')\n req.user = user\n resp = ProfileView.as_view()(req)\n assert resp.status_code == 200\n\n def test_profileAnonymus(self):\n req = RequestFactory().get('profile/')\n req.user = AnonymousUser()\n resp = views.ProfileView.as_view()(req)\n assert resp.status_code == 302\n assert 'login' in resp.url\n","repo_name":"ravitejamallozala/currencyexchange","sub_path":"exchange_backend/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":6021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9460687445","text":"from Base.base import Base\nfrom Base.page import Page\nimport pytest\nfrom Base.getData import GetData\n\n\ndef get_publish_article_data():\n data_list = []\n data = GetData.get_json_data('publish_audit_article.json')['mp_publish_article']\n data_list.append((data['title'], data['content'], data['channel'], data['exp']))\n return data_list\n\n\n@pytest.mark.run(order=2)\nclass TestPublishArticle:\n def setup_class(self):\n Page.get_mp_home().content_manage()\n Page.get_mp_home().publish_article()\n\n @pytest.mark.parametrize('title,content,channel,exp', get_publish_article_data())\n def test_publish_article(self, title, content, channel, exp):\n Page.get_mp_publish().publish_article(title, content, channel)\n assert Page.get_mp_publish().page_exists_text(exp)\n","repo_name":"a953732125/ui_test_HMTT","sub_path":"Scripts/mp/test_publish_article.py","file_name":"test_publish_article.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19708341883","text":"\"\"\" utility functions for ensembling experiments \"\"\"\n\nfrom collections import defaultdict\n\nfrom ogb.linkproppred import Evaluator\nimport torch\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\n\ndef compute_metric_kg(ent_emb, rel_emb, triples, method='RotatE', gamma=20.,\n eval_batch_size=64, return_scores=True):\n \"\"\" Compute score for each (head, relation, tail) triple using KG entity\n and relation embeddings. Note that the entity indices in `triples`\n must already have node type offsets added. \"\"\"\n scores = {'metrics' : {'mrr_list' : list(),\n 'hits@1_list' : list(),\n 'hits@3_list' : list(),\n 'hits@10_list' : list()}}\n if return_scores:\n scores['scores'] = {'y_pred_pos' : list(),\n 'y_pred_neg_head' : list(),\n 'y_pred_neg_tail' : list()}\n\n evaluator = Evaluator(name='ogbl-biokg')\n\n head_all, tail_all = triples['head'], triples['tail']\n relation_all = triples['relation']\n head_neg_all, tail_neg_all = triples['head_neg'], triples['tail_neg']\n\n # define score function to use\n if method == 'RotatE':\n scorefn = lambda h, r, t, mode: RotatE(h, r, t, gamma=gamma, mode=mode)\n elif method == 'TransE':\n scorefn = lambda h, r, t, mode: TransE(h, r, t, gamma=gamma, mode=mode)\n elif method == 'ComplEx':\n scorefn = lambda h, r, t, mode: ComplEx(h, r, t, mode=mode)\n elif method == 'DistMult':\n scorefn = lambda h, r, t, mode: DistMult(h, r, t, mode=mode)\n else:\n raise ValueError(f'{args.method} is not a valid method')\n\n # handle head-batch\n loader = DataLoader(torch.arange(len(head_all)), batch_size=eval_batch_size)\n head_cat = torch.cat([head_all.unsqueeze(1), head_neg_all], dim=1)\n for batch in tqdm(loader, desc='head-batch'):\n head_part, tail_part = head_cat[batch], tail_all[batch]\n batch_size, negative_sample_size = head_part.size(0), head_part.size(1)\n\n # [batch_size x negative_sample_size x embedding_dim]\n head = torch.index_select(\n ent_emb, dim=0, index=head_part.view(-1)\n ).view(batch_size, negative_sample_size, -1)\n\n # [batch_size x 1 x embedding_dim]\n relation = torch.index_select(\n rel_emb, dim=0, index=relation_all[batch],\n ).unsqueeze(1)\n\n # [batch_size x 1 x embedding_dim]\n tail = torch.index_select(\n ent_emb, dim=0, index=tail_part,\n ).unsqueeze(1)\n\n score = scorefn(head, relation, tail, 'head-batch')\n batch_results = evaluator.eval({'y_pred_pos': score[:, 0],\n 'y_pred_neg': score[:, 1:]})\n for key in batch_results:\n scores['metrics'][key].append(batch_results[key])\n\n # keep track of scores\n if return_scores:\n scores['scores']['y_pred_pos'].append(score[:, 0])\n scores['scores']['y_pred_neg_head'].append(score[:, 1:])\n\n # handle tail-batch\n loader = DataLoader(torch.arange(len(tail_all)), batch_size=eval_batch_size)\n tail_cat = torch.cat([tail_all.unsqueeze(1), tail_neg_all], dim=1)\n for batch in tqdm(loader, desc='tail-batch'):\n head_part, tail_part = head_all[batch], tail_cat[batch]\n batch_size, negative_sample_size = tail_part.size(0), tail_part.size(1)\n\n # [batch_size x 1 x embedding_dim]\n head = torch.index_select(\n ent_emb, dim=0, index=head_part,\n ).unsqueeze(1)\n\n # [batch_size x 1 x embedding_dim]\n relation = torch.index_select(\n rel_emb, dim=0, index=relation_all[batch],\n ).unsqueeze(1)\n\n # [batch_size x negative_sample_size x embedding_dim]\n tail = torch.index_select(\n ent_emb, dim=0, index=tail_part.view(-1),\n ).view(batch_size, negative_sample_size, -1)\n\n score = scorefn(head, relation, tail, 'tail-batch')\n batch_results = evaluator.eval({'y_pred_pos': score[:, 0],\n 'y_pred_neg': score[:, 1:]})\n for key in batch_results:\n scores['metrics'][key].append(batch_results[key])\n\n # keep track of scores\n if return_scores:\n scores['scores']['y_pred_neg_tail'].append(score[:, 1:])\n\n for key in scores['metrics']:\n scores['metrics'][key] = torch.cat(scores['metrics'][key]).cpu()\n\n if return_scores:\n for key in scores['scores']:\n scores['scores'][key] = torch.cat(scores['scores'][key]).cpu()\n\n return scores\n\n\ndef RotatE(head, relation, tail, gamma, epsilon=2.0, mode='head-batch'):\n \"\"\" RotatE score function given entity and relation embeddings\n if mode == head-batch:\n head should be size (batch_size, num_neg_samples, 2 * hidden_dim)\n relation should be size (batch_size, 1, hidden_dim),\n tail should be size (batch_size, 1, 2 * hidden_dim)\n elif mode == tail-batch:\n head should be size (batch_size, 1, 2 * hidden_dim)\n relation should be size (batch_size, 1, hidden_dim)\n tail should be size (batch_size, num_neg_samples, 2 * hidden_dim)\n \"\"\"\n pi = 3.14159265358979323846\n hidden_dim = relation.size(2)\n\n re_head, im_head = torch.chunk(head, 2, dim=2)\n re_tail, im_tail = torch.chunk(tail, 2, dim=2)\n\n embedding_range = torch.nn.Parameter(\n torch.Tensor([(gamma + epsilon) / hidden_dim]),\n requires_grad=False\n )\n\n phase_relation = relation / (embedding_range.item() / pi)\n\n re_relation = torch.cos(phase_relation)\n im_relation = torch.sin(phase_relation)\n\n if mode == 'head-batch':\n re_score = re_relation * re_tail + im_relation * im_tail\n im_score = re_relation * im_tail - im_relation * re_tail\n re_score = re_score - re_head\n im_score = im_score - im_head\n else:\n re_score = re_head * re_relation - im_head * im_relation\n im_score = re_head * im_relation + im_head * re_relation\n re_score = re_score - re_tail\n im_score = im_score - im_tail\n\n score = torch.stack([re_score, im_score], dim = 0)\n score = score.norm(dim = 0)\n\n score = gamma - score.sum(dim = 2)\n return score\n\n\ndef ComplEx(head, relation, tail, mode):\n \"\"\" ComplEx score function given entity and relation embeddings\n if mode == head-batch:\n head should be size (batch_size, num_neg_samples, 2 * hidden_dim)\n relation should be size (batch_size, 1, hidden_dim),\n tail should be size (batch_size, 1, 2 * hidden_dim)\n elif mode == tail-batch:\n head should be size (batch_size, 1, 2 * hidden_dim)\n relation should be size (batch_size, 1, hidden_dim)\n tail should be size (batch_size, num_neg_samples, 2 * hidden_dim)\n \"\"\"\n re_head, im_head = torch.chunk(head, 2, dim=2)\n re_relation, im_relation = torch.chunk(relation, 2, dim=2)\n re_tail, im_tail = torch.chunk(tail, 2, dim=2)\n\n if mode == 'head-batch':\n re_score = re_relation * re_tail + im_relation * im_tail\n im_score = re_relation * im_tail - im_relation * re_tail\n score = re_head * re_score + im_head * im_score\n else:\n re_score = re_head * re_relation - im_head * im_relation\n im_score = re_head * im_relation + im_head * re_relation\n score = re_score * re_tail + im_score * im_tail\n\n score = score.sum(dim = 2)\n return score\n\n\ndef TransE(head, relation, tail, gamma, mode='head-batch'):\n \"\"\" TransE score function \"\"\"\n if mode == 'head-batch':\n score = head + (relation - tail)\n else:\n score = (head + relation) - tail\n\n score = gamma - torch.norm(score, p=1, dim=2)\n return score\n\n\ndef DistMult(head, relation, tail, mode='head-batch'):\n \"\"\" DistMult score function \"\"\"\n if mode == 'head-batch':\n score = head * (relation * tail)\n else:\n score = (head * relation) * tail\n\n score = score.sum(dim = 2)\n return score\n\n","repo_name":"rahuln/lm-bio-kgc","sub_path":"script/kge_util.py","file_name":"kge_util.py","file_ext":"py","file_size_in_byte":8050,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"21"} +{"seq_id":"70952327413","text":"from aoe.data.locale import Locale\n\n\nclass BaseCiv:\n def __init__(self, raw):\n self.raw = raw\n self.names = self.raw.base['civ_names']\n self.civ_desc = self.raw.base['civ_helptexts']\n\n # find in locales and return how it named in raw json\n def _raw_name(self, name):\n old_locale = self.raw.locale\n raw_name = None\n\n for locale in Locale().codes():\n self.raw.set_locale(locale)\n code = {v: k for k, v in self.raw.strings.items()}.get(name.capitalize()) # get code if exist\n if code:\n raw_name = {v: k for k, v in self.names.items()}.get(code)\n break\n\n self.raw.set_locale(old_locale)\n return raw_name\n","repo_name":"AoLieGe/AyBot","sub_path":"aoe/parsers/civ/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27924454615","text":"# -*- coding: utf-8 -*-\r\n\r\nimport sys\r\nfrom hw1_ui import Ui_MainWindow\r\nimport cv2 as cv\r\nimport numpy as np\r\nimport glob\r\nimport os\r\nfrom PyQt5.QtWidgets import QMainWindow, QApplication\r\n\r\nclass MainWindow(QMainWindow, Ui_MainWindow):\r\n def __init__(self, parent=None):\r\n super(MainWindow, self).__init__(parent)\r\n self.setupUi(self)\r\n self.onBindingUI()\r\n\r\n def onBindingUI(self):\r\n self.btn1_1.clicked.connect(self.on_btn1_1_click)\r\n self.btn1_2.clicked.connect(self.on_btn1_2_click)\r\n self.btn1_3.clicked.connect(self.on_btn1_3_click)\r\n self.btn1_4.clicked.connect(self.on_btn1_4_click)\r\n self.btn2_1.clicked.connect(self.on_btn2_1_click)\r\n self.btn3_1.clicked.connect(self.on_btn3_1_click)\r\n self.btn3_2.clicked.connect(self.on_btn3_2_click)\r\n self.btn4_1.clicked.connect(self.on_btn4_1_click)\r\n self.btn4_2.clicked.connect(self.on_btn4_2_click)\r\n\r\n def on_btn1_1_click(self):\r\n # termination criteria\r\n criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)\r\n\r\n # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\r\n objp = np.zeros((8*11,3), np.float32)\r\n objp[:,:2] = np.mgrid[0:11,0:8].T.reshape(-1,2)\r\n\r\n # Arrays to store object points and image points from all the images.\r\n objpoints = [] # 3d point in real world space\r\n imgpoints = [] # 2d points in image plane.\r\n\r\n #read all the images from folder\r\n images= glob.glob('../images/CameraCalibration/*.bmp')\r\n\r\n i=0\r\n for fname in images:\r\n img = cv.imread(fname)\r\n gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)\r\n\r\n # Find the chess board corners\r\n ret, corners = cv.findChessboardCorners(gray, (11,8),None)\r\n\r\n # If found, add object points, image points (after refining them)\r\n i=i+1\r\n if ret == True:\r\n objpoints.append(objp)\r\n corners2 = cv.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)\r\n imgpoints.append(corners2)\r\n\r\n # Draw and display the corners\r\n img = cv.drawChessboardCorners(img, (11,8), corners2,ret)\r\n\r\n cv.namedWindow(str(i),cv.WINDOW_GUI_NORMAL )\r\n cv.imshow(str(i),img)\r\n cv.waitKey(500)\r\n\r\n cv.destroyAllWindows()\r\n\r\n def on_btn1_2_click(self):\r\n # termination criteria\r\n criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)\r\n\r\n # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\r\n objp = np.zeros((8*11,3), np.float32)\r\n objp[:,:2] = np.mgrid[0:11,0:8].T.reshape(-1,2)\r\n\r\n # Arrays to store object points and image points from all the images.\r\n objpoints = [] # 3d point in real world space\r\n imgpoints = [] # 2d points in image plane.\r\n\r\n #read all the images from folder\r\n images= glob.glob('../images/CameraCalibration/*.bmp')\r\n\r\n i=0\r\n for fname in images:\r\n img = cv.imread(fname)\r\n gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)\r\n\r\n # Find the chess board corners\r\n ret, corners = cv.findChessboardCorners(gray, (11,8),None)\r\n\r\n # If found, add object points, image points (after refining them)\r\n i=i+1\r\n if ret == True:\r\n objpoints.append(objp)\r\n corners2 = cv.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)\r\n imgpoints.append(corners2)\r\n ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)\r\n print(mtx)\r\n cv.waitKey(500)\r\n cv.destroyAllWindows()\r\n\r\n def on_btn1_3_click(self):\r\n # get the input from ui item\r\n number = int(self.cboxImgNum.currentText())\r\n\r\n # termination criteria\r\n criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)\r\n\r\n # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\r\n objp = np.zeros((8*11,3), np.float32)\r\n objp[:,:2] = np.mgrid[0:11,0:8].T.reshape(-1,2)\r\n\r\n # Arrays to store object points and image points from all the images.\r\n objpoints = [] # 3d point in real world space\r\n imgpoints = [] # 2d points in image plane.\r\n\r\n # read images\r\n path = '../images/CameraCalibration/'+ str(number)+'.bmp'\r\n img = cv.imread(path)\r\n gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)\r\n print(path)\r\n # Find the chess board corners\r\n ret, corners = cv.findChessboardCorners(gray, (11,8),None)\r\n\r\n # If found, add object points, image points (after refining them)\r\n if ret == True:\r\n objpoints.append(objp)\r\n corners2 = cv.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)\r\n imgpoints.append(corners2)\r\n ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)\r\n\r\n # get rotation matrix and plus tranalation matrix\r\n R, jacobian = cv.Rodrigues(rvecs[0])\r\n extrinsic = np.hstack((R,tvecs[0]))\r\n print(extrinsic)\r\n\r\n\r\n def on_btn1_4_click(self):\r\n # termination criteria\r\n criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)\r\n\r\n # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\r\n objp = np.zeros((8*11,3), np.float32)\r\n objp[:,:2] = np.mgrid[0:11,0:8].T.reshape(-1,2)\r\n\r\n # Arrays to store object points and image points from all the images.\r\n objpoints = [] # 3d point in real world space\r\n imgpoints = [] # 2d points in image plane.\r\n\r\n #read all the images from folder\r\n images = glob.glob('../images/CameraCalibration/*.bmp')\r\n for fname in images:\r\n img = cv.imread(fname)\r\n gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)\r\n\r\n # Find the chess board corners\r\n ret, corners = cv.findChessboardCorners(gray, (11,8),None)\r\n\r\n # If found, add object points, image points (after refining them)\r\n if ret == True:\r\n objpoints.append(objp)\r\n corners2 = cv.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)\r\n imgpoints.append(corners2)\r\n ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)\r\n print(dist)\r\n\r\n\r\n def on_btn2_1_click(self):\r\n # termination criteria\r\n criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)\r\n\r\n # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\r\n objp = np.zeros((8*11,3), np.float32)\r\n objp[:,:2] = np.mgrid[0:11,0:8].T.reshape(-1,2)\r\n\r\n # Arrays to store object points and image points from all the images.\r\n objpoints = [] # 3d point in real world space\r\n imgpoints = [] # 2d points in image plane.\r\n\r\n #read all the images from folder\r\n images = glob.glob('../images/CameraCalibration/*.bmp')\r\n\r\n for fname in images:\r\n img = cv.imread(fname)\r\n gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)\r\n\r\n # Find the chess board corners\r\n ret, corners = cv.findChessboardCorners(gray, (11,8),None)\r\n\r\n # If found, add object points, image points (after refining them)\r\n if ret == True:\r\n objpoints.append(objp)\r\n\r\n corners2 = cv.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)\r\n imgpoints.append(corners2)\r\n\r\n ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)\r\n\r\n # Function to draw the axis\r\n def draw(img, corners, imgpts):\r\n imgpts = np.int32(imgpts).reshape(-1,2)\r\n\r\n # draw ground floor in green\r\n img = cv.drawContours(img, [imgpts[:4]],-1,(0,0,255),10)\r\n\r\n # draw pillars in blue color\r\n for i,j in zip(range(4),range(4,8)):\r\n img = cv.line(img, tuple(imgpts[i]), tuple(imgpts[j]),(0,0,255),10)\r\n\r\n # draw top layer in red color\r\n img = cv.drawContours(img, [imgpts[4:]],-1,(0,0,255),10)\r\n return img\r\n\r\n criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)\r\n objp = np.zeros((8*11,3), np.float32)\r\n objp[:,:2] = np.mgrid[0:11,0:8].T.reshape(-1,2)\r\n axis = np.float32([[0,0,0], [0,2,0], [2,2,0], [2,0,0],[0,0,-2],[0,2,-2],[2,2,-2],[2,0,-2] ])\r\n\r\n # declare a array to store video frame\r\n Video_img=[]\r\n for fname in glob.glob('../images/Augment/*.bmp'):\r\n img = cv.imread(fname)\r\n gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)\r\n ret, corners = cv.findChessboardCorners(gray, (11,8),None)\r\n\r\n if ret == True:\r\n corners2 = cv.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)\r\n\r\n # Find the rotation and translation vectors.\r\n _,rvecs, tvecs, inliers = cv.solvePnPRansac(objp, corners2, mtx, dist)\r\n\r\n # project 3D points to image plane\r\n imgpts, jac = cv.projectPoints(axis, rvecs, tvecs, mtx, dist)\r\n\r\n img = draw(img,corners2,imgpts)\r\n Video_img.append(img)\r\n cv.imshow('Video',img)\r\n cv.waitKey(500)\r\n\r\n # making vidoe\r\n height,width,layers=Video_img[1].shape\r\n video=cv.VideoWriter('video.mp4',-1,2,(width,height))\r\n for j in range(0,5):\r\n video.write(Video_img[j])\r\n\r\n cv.destroyAllWindows()\r\n\r\n def on_btn3_1_click(self):\r\n # read the image\r\n img = cv.imread('../images/OriginalTransform.png')\r\n\r\n # read the transform data from ui\r\n edtAngle = float(self.edtAngle.text())\r\n edtScale = float(self.edtScale.text())\r\n edtTx = float(self.edtTx.text())\r\n edtTy = float(self.edtTy.text())\r\n\r\n # making translate matrix\r\n H = np.float32([[1,0,edtTx],[0,1,edtTy]])\r\n\r\n # translate the small squared image\r\n rows,cols = img.shape[:2]\r\n Translate_img = cv.warpAffine(img,H,(rows,cols))\r\n\r\n # making rotate and scale matrix\r\n rows,cols = Translate_img.shape[:2]\r\n M = cv.getRotationMatrix2D((130+edtTy,125+edtTy),edtAngle,edtScale)\r\n\r\n #rotate and scale the small squared image\r\n result = cv.warpAffine(Translate_img,M,(rows,cols))\r\n\r\n #show the result\r\n cv.imshow('Original Image', img)\r\n cv.imshow('Rotation + Translate + Scale Imag',result)\r\n\r\n def on_btn3_2_click(self):\r\n # declare 2 point array\r\n pts1=[]\r\n pts2 = np.float32([[20,20],[450,20],[450,450],[20,450]])\r\n\r\n def CallBack(event,x,y,flags,param):\r\n\r\n # if clicked doing the following things\r\n if event == cv.EVENT_LBUTTONDOWN:\r\n nonlocal pts1\r\n pts1.append([x,y])\r\n\r\n # if clicked th images for four times, then wraping the origin to the perspective image\r\n if len(pts1)==4:\r\n pts1 = np.float32(pts1)\r\n M = cv.getPerspectiveTransform(pts1,pts2)\r\n dst = cv.warpPerspective(img,M,(450,450))\r\n print(pts1)\r\n cv.imshow('Perspective Result Image', dst)\r\n\r\n img = cv.imread('../images/OriginalPerspective.png')\r\n cv.namedWindow('origin')\r\n cv.imshow('origin',img)\r\n\r\n # add `setMouseCallback()` to the window\r\n cv.setMouseCallback('origin',CallBack)\r\n\r\n def on_btn4_1_click(self):\r\n # read left and right images\r\n imgL = cv.imread('../images/imL.png',0)\r\n imgR = cv.imread('../images/imR.png',0)\r\n\r\n # making disparity map\r\n stereo = cv.StereoSGBM_create(numDisparities=48, blockSize=3) #the third parameter\r\n disparity = stereo.compute(imgL,imgR)\r\n\r\n # normalization\r\n normalized_img = np.zeros((800, 800))\r\n normalized_img = cv.normalize(disparity, normalized_img, 0, 255, cv.NORM_MINMAX,cv.CV_8U)\r\n\r\n cv.imshow('Without L-R Disparity Check',normalized_img)\r\n\r\n\r\n def on_btn4_2_click(self):\r\n # read left and right images\r\n imgL = cv.imread('../images/imL.png',0)\r\n imgR = cv.imread('../images/imR.png',0)\r\n\r\n # making disparity map without checked\r\n stereo = cv.StereoSGBM_create(numDisparities=48, blockSize=3, disp12MaxDiff=0) #the third parameter\r\n disparity = stereo.compute(imgL,imgR)\r\n\r\n # making disparity map with checked\r\n stereo_checked = cv.StereoSGBM_create(numDisparities=48, blockSize=3, disp12MaxDiff=2)\r\n disparity_checked = stereo_checked.compute(imgL, imgR)\r\n\r\n # normalization\r\n normalized_img = np.zeros((800, 800))\r\n normalized_img = cv.normalize(disparity, normalized_img, 0, 255, cv.NORM_MINMAX,cv.CV_8U)\r\n cv.imshow('Without the left-right disparity check',normalized_img)\r\n\r\n normalized_checked = np.zeros((800, 800))\r\n normalized_checked = cv.normalize(disparity_checked, normalized_checked, 0, 255, cv.NORM_MINMAX,cv.CV_8U)\r\n cv.imshow('With the left-right disparity check',normalized_checked)\r\n\r\n # count the difference\r\n diff = cv.absdiff(normalized_img, normalized_checked)\r\n (x,y) = np.where(diff>0)\r\n\r\n diff_img = cv.cvtColor(normalized_checked,cv.COLOR_GRAY2RGB)\r\n\r\n for i in range(len(x)):\r\n diff_img[x[i],y[i]] = (0,0,255)\r\n cv.imshow('Mark The Diff',diff_img)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app = QApplication(sys.argv)\r\n window = MainWindow()\r\n window.show()\r\n sys.exit(app.exec_())\r\n","repo_name":"ya-sin/cv_hw1","sub_path":"CvHw1_Python/hw1_example.py","file_name":"hw1_example.py","file_ext":"py","file_size_in_byte":13867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4582366979","text":"\nfrom collections import namedtuple\n\n\nclass Card:\n \"\"\"Represents a playing card.\"\"\"\n\n __slots__ = [\"_suit\", \"_rank_name\", \"_rank_value\"]\n\n def __init__(self, suit, rank_name, rank_value):\n self._suit = suit\n self._rank_name = rank_name\n self._rank_value = rank_value\n\n @property\n def rank_value(self):\n return self._rank_value\n\n def __repr__(self):\n return f\"{self._rank_name} of {self._suit}\"\n\n\nRank = namedtuple(\"Rank\", \"name value\")\n\n\nclass Deck:\n \"\"\"Represents a deck of cards.\"\"\"\n\n __slots__ = [\"_cards\"]\n\n _SUITS = (\"Hearts\", \"Diamonds\", \"Clubs\", \"Spades\")\n _RANKS = (\n Rank(name=\"Ace\", value=1),\n Rank(name=\"Two\", value=2),\n Rank(name=\"Three\", value=3),\n Rank(name=\"Four\", value=4),\n Rank(name=\"Five\", value=5),\n Rank(name=\"Six\", value=6),\n Rank(name=\"Seven\", value=7),\n Rank(name=\"Eight\", value=8),\n Rank(name=\"Nine\", value=9),\n Rank(name=\"Ten\", value=10),\n Rank(name=\"Jack\", value=11),\n Rank(name=\"Queen\", value=12),\n Rank(name=\"King\", value=13)\n )\n\n def __init__(self):\n self._cards = [Card(suit, rank.name, rank.value)\n for suit in self._SUITS\n for rank in self._RANKS]\n\n @property\n def cards(self):\n return self._cards\n\n @classmethod\n def stripped_deck(cls, *args):\n \"\"\"\n Class method to create a deck\n with the specified cards removed.\n \"\"\"\n deck = cls()\n deck._cards = [card for card in deck._cards\n if card.rank_value not in args]\n return deck\n\n @classmethod\n def piquet_deck(cls):\n \"\"\"\n Class method to create a piquet deck.\n \"\"\"\n return cls.stripped_deck(2, 3, 4, 5, 6)\n\n\ndeck = Deck()\nprint(deck.cards)\n\n# Get a stripped deck without 2s, 3s, 4s, 5s, and 6s\ndeck2 = Deck.stripped_deck(2, 3, 4, 5, 6)\nprint(deck2.cards)\n\n# Get a piquet deck\ndeck3 = Deck.piquet_deck()\nprint(deck3.cards)\n","repo_name":"Archive-42/nov-static","sub_path":"08-my-website/06-module-6/week-18/unsorted/W18D3_lectures/more-about-classes/version_03.py","file_name":"version_03.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"2966564592","text":"from __future__ import division, print_function\n\nimport os\nimport detect\nimport tensorflow as tf\nfrom tensorflow.python.saved_model import tag_constants\n\n# Flask utils\nfrom flask import Flask, redirect, url_for, request, render_template\nfrom werkzeug.utils import secure_filename\nfrom gevent.pywsgi import WSGIServer\n\n# Define a flask app\napp = Flask(__name__)\nload_model = \"./checkpoints/yolov4-416\"\nsaved_model_loaded = tf.saved_model.load(load_model, tags=[tag_constants.SERVING])\nprint('Model loaded. Check http://127.0.0.1:5000/')\n\n\n@app.route('/', methods=['GET'])\ndef index():\n # Main page\n return render_template('index.html')\n\n\n@app.route('/predict', methods=['GET', 'POST'])\ndef upload():\n if request.method == 'POST':\n # Get the file from post request\n f = request.files['file']\n\n # Save the file to ./uploads\n basepath = os.path.dirname(__file__)\n file_path = os.path.join(\n basepath, 'uploads', secure_filename(f.filename))\n f.save(file_path)\n\n # Make prediction\n #file_path\n get_detected_object = detect.glass_detector(file_path, saved_model_loaded)\n #pil image to base 64\n return get_detected_object\n return None\n\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"ZocP/YOLO4-TF","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22635979086","text":"#! /usr/bin/python\n\nfrom __future__ import division\nimport matplotlib.pyplot as plt\n\n# j= 0 1 2 3 \nX = [1, 2, 6, 11, 15, 18]\nY = [1, 10, 3, 7, 4, 11]\noX = [0, 20]\noY = [0, 15]\n\ndef interpolate(X, Y):\n #step 1\n A = Y[:]\n #step 3\n H = []\n for i in range(0, len(X) - 1):\n H.append(X[i+1] -X[i])\n #step 4\n F = []\n F.append((3/H[0])*(A[1]-A[0]))\n for i in range(1, len(A) - 1):\n val = (3/H[i])*(A[i+1]-A[i])-(3/H[i-1])*(A[i]-A[i-1])\n F.append(val)\n #step 5\n L = []\n M = []\n Z = []\n\n #step 6\n L.append(1)\n M.append(0)\n Z.append(0)\n\n #step 7\n\n for i in range(1, len(X)-1):\n val = 2*(X[i+1]-X[i-1])-(H[i-1]*M[i-1])\n L.append(val)\n val = H[i]/L[i]\n M.append(val)\n val = (F[i]-(H[i-1]*Z[i-1]))/L[i]\n Z.append(val)\n\n #step 8\n L.append(1)\n Z.append(0)\n\n #my step\n B = []\n C = []\n D = []\n for i in range(0, len(X)):\n B.append(0)\n C.append(0)\n D.append(0)\n #step 9\n for i in reversed(range(0,len(X) -1)):\n C[i] = Z[i] - M[i]*C[i+1]\n B[i] = ((A[i+1] - A[i])/H[i])-(H[i]*(C[i+1]+2*C[i])/3)\n D[i] = (C[i+1]-C[i])/(3*H[i])\n result = []\n for i in range(0, len(X)-1):\n result.append((A[i], B[i], C[i], D[i], X[i]))\n return result\n\n\n\ndef calculate(x, f):\n a,b,c,d,x0 = f\n return a + b*(x-x0) + c*(x-x0)*(x-x0) + d*(x-x0)*(x-x0)*(x-x0)\n\ndef main():\n fn = interpolate(X, Y)\n R = [i/10 for i in range(X[0]*10, X[len(X)-1]*10)]\n V = []\n for i in R:\n for j in range(0, len(X)-1):\n if (i < X[j+1]):\n V.append(calculate(i,fn[j]))\n break\n\n print(len(R))\n print(len(V))\n\n draw(X, Y, R, V, oX, oY)\n\n\ndef draw(pointsX, pointsY, X, Y, oX, oY):\n plt.figure(1)\n plt.subplot(311)\n plt.plot(pointsX, pointsY, 'bo')\n plt.axis(oX + oY)\n\n plt.subplot(312)\n plt.plot(X, Y, 'k')\n plt.axis(oX + oY)\n\n plt.subplot(313)\n plt.plot(pointsX , pointsY, 'bo', X, Y, 'k')\n plt.axis(oX + oY)\n\n plt.show()\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"tomaszkrysiuk/spline","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9025074048","text":"def exists(ls,x,y):\n path_exists=False\n if y!=0:\n try:\n if ls[y-1][x]==0:\n path_exists=True\n except:\n pass\n \n if y!=len(ls)-1:\n try:\n if ls[y+1][x]==0:\n path_exists=True\n except:\n pass\n \n if x!=0:\n try:\n if ls[y][x-1]==0:\n path_exists=True\n except:\n pass\n if x!=len(ls[y])-1:\n try:\n if ls[y][x+1]==0:\n path_exists=True\n except:\n pass\n \n return path_exists\n \n\n\n\ndef vert_up(ls,x,y):\n try:\n if y==0:\n return {\"status\":False,\"x\":None,\"y\":None}\n elif ls[y][x]==1:\n return {\"status\":False,\"x\":None,\"y\":None}\n else:\n val=ls[y][x]\n if val==0:\n if ls[y-1][x]==0:\n return {\"status\":True,\"x\":x,\"y\":y-1}\n else:\n return {\"status\":False,\"x\":None,\"y\":None}\n else:\n return {\"status\":False,\"x\":None,\"y\":None}\n except:\n return {\"status\":False,\"x\":None,\"y\":None}\n\n \n \n \ndef vert_down(ls,x,y):\n try:\n if y==len(ls)-1:\n return {\"status\":False,\"x\":None,\"y\":None}\n elif ls[y][x]==1:\n return {\"status\":False,\"x\":None,\"y\":None}\n else:\n val=ls[y][x]\n if val==0:\n if ls[y+1][x]==0:\n return {\"status\":True,\"x\":x,\"y\":y+1}\n else:\n return {\"status\":False,\"x\":None,\"y\":None}\n else:\n return {\"status\":False,\"x\":None,\"y\":None}\n except:\n return {\"status\":False,\"x\":None,\"y\":None}\n \n \n \ndef hort_left(ls,x,y):\n try:\n if x==0:\n return {\"status\":False,\"x\":None,\"y\":None}\n elif ls[y][x]==1:\n return {\"status\":False,\"x\":None,\"y\":None}\n else:\n val=ls[y][x]\n if val==0:\n if ls[y][x-1]==0:\n return {\"status\":True,\"x\":x-1,\"y\":y}\n else:\n return {\"status\":False,\"x\":None,\"y\":None}\n else:\n return {\"status\":False,\"x\":None,\"y\":None}\n except:\n return {\"status\":False,\"x\":None,\"y\":None}\n \n \n \ndef hort_right(ls,x,y):\n try:\n if x==len(ls[y])-1:\n return {\"status\":False,\"x\":None,\"y\":None}\n elif ls[y][x]==1:\n return {\"status\":False,\"x\":None,\"y\":None}\n else:\n val=ls[y][x]\n if val==0:\n if ls[y][x+1]==0:\n return {\"status\":True,\"x\":x+1,\"y\":y}\n else:\n return {\"status\":False,\"x\":None,\"y\":None}\n else:\n return {\"status\":False,\"x\":None,\"y\":None}\n except:\n return {\"status\":False,\"x\":None,\"y\":None}\n\n \n \n \ndef get_all_possible(ls):\n d={}\n for y in range(0,len(ls)):\n for x in range(0,len(ls[y])):\n d[str(x)+\",\"+str(y)]=[]\n for y in range(0,len(ls)):\n for x in range(0,len(ls[y])):\n try:\n vu=vert_up(ls,x,y)\n if vu[\"status\"]==True:\n tmp=d[str(x)+\",\"+str(y)]\n tmp.append(vu)\n d[str(x)+\",\"+str(y)]=tmp\n except:\n pass\n try:\n vd=vert_down(ls,x,y)\n if vd[\"status\"]==True:\n tmp=d[str(x)+\",\"+str(y)]\n tmp.append(vd)\n d[str(x)+\",\"+str(y)]=tmp\n except:\n pass\n try:\n hl=hort_left(ls,x,y)\n if hl[\"status\"]==True:\n tmp=d[str(x)+\",\"+str(y)]\n tmp.append(hl)\n d[str(x)+\",\"+str(y)]=tmp\n except:\n pass\n try:\n hr=hort_right(ls,x,y)\n if hr[\"status\"]==True:\n tmp=d[str(x)+\",\"+str(y)]\n tmp.append(hr)\n d[str(x)+\",\"+str(y)]=tmp\n\n except:\n pass\n return d\n\n\n\n\n\n\n\n\n\n\n\n\ndef get_min_path(ls):\n import copy\n d=get_all_possible(ls)\n n=1\n found=False\n d1=copy.deepcopy(d)\n next_moves=d1[\"0\"+\",\"+\"0\"]\n current_x=0\n current_y=0\n visited=[]\n visited.append({\"status\":True,\"x\":0,\"y\":0})\n while found==False and next_moves!=[]:\n \n# print(next_moves)\n n+=1\n temp_next=[]\n temp_iter=copy.deepcopy(next_moves)\n for x in temp_iter:\n to_check={\"status\":True,\"x\":len(ls[-1])-1,\"y\":len(ls)-1}\n if to_check in next_moves:\n found=True\n return n\n current_x=x[\"x\"]\n current_y=x[\"y\"]\n# print(current_x,current_y)\n# print(\"\")\n visited.append({\"status\":True,\"x\":current_x,\"y\":current_y})\n tmp=d[str(current_x)+\",\"+str(current_y)]\n for t in tmp:\n if t not in visited:\n# if to_check in next_moves:\n# found=True\n# return n+1\n temp_next.append(t)\n \n #temp_next=[z for z in temp_next if z not in visited]\n next_moves=temp_next\n# to_check={\"status\":True,\"x\":len(ls[-1])-1,\"y\":len(ls)-1}\n# print(\"to check: \",to_check,\" in \",next_moves)\n# if to_check in next_moves:\n# found=True\n# return n+1\n if next_moves==[]:\n# print(\"Unsolvable\")\n return 9999\n\n\ndef get_min_path_2(ls):\n d=get_all_possible(ls)\n found=False\n visited=[]\n n=1\n visited.append((0,0))\n next_move=[(x[\"x\"],x[\"y\"]) for x in d[\"0\"+\",\"+\"0\"]]\n while found==False:\n# print(next_move)\n tmp_next=[]\n n+=1\n\n for one in next_move:\n next_x,next_y=one\n tmp=[(x[\"x\"],x[\"y\"]) for x in d[str(next_x)+\",\"+str(next_y)]]\n tmp_next=tmp_next+tmp\n visited.append((next_x,next_y))\n next_move=tmp_next\n if (len(ls[-1])-1,len(ls)-1) in next_move:\n# print(\"Found true,\",n)\n return n+1\n\n next_move=[x for x in next_move if x not in visited]\n if next_move==[]:\n# print(\"Unsolvable\")\n return 9999\n\n\n\n\n\n\n\n\n\n\n\ndef solution(ls):\n# max_len=0\n# lss=[]\n# for x in ls:\n# if len(x)>max_len:\n# max_len=len(x)\n\n# for x in range(0,len(ls)):\n# if len(ls[x])!=max_len:\n# tmp=ls[x]+[2 for z in range(1,(max_len-len(ls[x]))+1)]\n# lss.append(tmp)\n# else:\n# lss.append(ls[x])\n# ls=lss\n if len(ls)==1:\n if len(ls[0])==1:\n return 1\n import copy\n minimum=get_min_path(ls)\n for x in range(0,len(ls)):\n for y in range(0,len(ls[x])):\n# pass\n if ls[x][y]==1:\n #if exists(ls,x,y)==True:\n iter_list = copy.deepcopy(ls)\n iter_list[x][y]=0\n try_minimum=get_min_path(iter_list)\n if try_minimum Dict:\n response = self._callGETApi(\n '/workflow/state-info', {'process_id': processId})\n if response.code == 200:\n return response.body['data']\n return {}\n\n def deleteProcess(self, id: str) -> bool:\n response = self._callDELETEApi(\n '/workflow/delete', {'id': id})\n if response.code == 200:\n return True\n return False\n\n def processInfo(self, processId: str) -> Dict:\n response = self._callGETApi(\n '/workflow/process-info', {'process_id': processId})\n if response.code == 200:\n return response.body['data']\n return {}\n\n def workerInfo(self, workerId: str) -> Dict:\n response = self._callGETApi(\n '/worker/info', {'id': workerId})\n if response.code == 200:\n return response.body['data']\n return {}\n\n def get_fields(self, workflow_name: str) -> Dict:\n response = self._callGETApi(\n '/workflow/fields', {'workflow_name': workflow_name})\n if response.code == 200:\n return response.body['data']\n return {}\n\n def filter(self, workflows_name: list[str] = [], processes_id: list[str] = [],\n filter_finished_processes: str = \"false\", state: str = None, with_fields: str = \"false\",\n owner_id: int = 0, page_size: int = 500, page: int = 1) -> [Dict, Dict]:\n response = self._callPOSTApi('/workflow/filter', {'workflows': workflows_name,\n 'processes': processes_id,\n 'filter_finished_processes': filter_finished_processes,\n 'state': state,\n 'with_fields': with_fields,\n 'owner_id': owner_id,\n \"page_size\": page_size,\n \"page\": page})\n if response.code == 200:\n return response.body['data'], response.body['pagination']\n return [{}, {}]\n\n def processAction(self, processId: str, state_action: str, message: str = None, fields: Dict = {}) -> Tuple[\n Dict, str]:\n \"\"\"call short action with no send files\n\n Args:\n processId (str): _description_\n state_action (str): _description_\n message (str, optional): _description_. Defaults to None.\n fields (Dict, optional): _description_. Defaults to {}.\n\n Returns:\n Dict: _description_\n \"\"\"\n data = {\n 'process_id': processId,\n 'state_action': state_action,\n }\n if message is not None:\n data['message'] = message\n # =>add fields\n req_fields = {}\n for key, value in fields.items():\n req_fields['field.' + key] = value\n data['fields'] = req_fields\n response = self._callPOSTApi(\n '/workflow/short-action', data)\n if response.code == 200:\n return (response.body['data'], None)\n return (None, response.body)\n","repo_name":"workflow-engine-service/cli","sub_path":"data/interfaces/python3/lib/apis/user_api.py","file_name":"user_api.py","file_ext":"py","file_size_in_byte":3386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74303727412","text":"\"\"\"\nクライアントを実装する上でのテンプレート\n基本的にはクライアント側の対話システムの出力をdataに格納さえしていればOK\n\"\"\"\nimport argparse\nimport json\nimport logging\nimport pickle\nimport socket\nimport sys\n\n\nlogging.basicConfig(\n format='%(asctime)s #%(lineno)s %(levelname)s %(name)s ::: %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n level=logging.INFO,\n stream=sys.stdout,\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef create_client_response(server_context):\n return f\"client (model): オウム:{server_context}\"\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"\")\n parser.add_argument(\"--port\", default=50000, type=int, help=\"port number\")\n parser.add_argument(\"--start_utter\", default=\"こんにちは\", type=str, help=\"start utterance\")\n args = parser.parse_args()\n\n context_list, prob_list = [], []\n\n host = socket.gethostbyname(socket.gethostname())\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # connect server\n client.connect((host, args.port))\n logger.info(\"connect server!\")\n\n while True:\n server_context = client.recv(4096)\n if not server_context:\n break\n\n # received response\n server_context = pickle.loads(server_context)\n\n # 第二話者の場合,最初の発話を対話履歴に追加しておく\n if server_context[\"response\"] != args.start_utter:\n context_list.append(args.start_utter)\n prob_list.append(0.)\n\n logger.info(\"\\033[32m\" + \"receive ... \" + json.dumps(server_context, ensure_ascii=False) + \"\\033[0m\")\n context_list.append(server_context[\"response\"])\n prob_list.append(server_context[\"prob\"])\n\n model_response = create_client_response(context_list[-1])\n send_data = {\"response\": model_response, \"prob\": 1.0}\n logger.info(\"\\033[34m\" + \"send ... \" + json.dumps(send_data, ensure_ascii=False) + \"\\033[0m\")\n context_list.append(send_data[\"response\"])\n prob_list.append(send_data[\"prob\"])\n \n client.send(pickle.dumps(send_data))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"cl-tohoku/aobav2","sub_path":"aoba/work/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8413023812","text":"from ryu.base import app_manager\nfrom ryu.controller import ofp_event\nfrom ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER\nfrom ryu.controller.handler import set_ev_cls\nfrom ryu.ofproto import ofproto_v1_3\nfrom ryu.lib.packet import packet\nfrom ryu.lib.packet import ethernet\nfrom ryu.lib.packet import ether_types\nfrom ryu.lib.packet import ipv4\nfrom ryu.lib.packet import tcp\n\nfrom scada_log.write_log_txt import write_log\nfrom scada_log.epoch_to_datetime import epoch_to_datetime\nfrom ryu.lib.packet import modbus_tcp\n\nclass SimpleSwitch13(app_manager.RyuApp):\n OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]\n\n def __init__(self, *args, **kwargs):\n super(SimpleSwitch13, self).__init__(*args, **kwargs)\n self.mac_to_port = {}\n self.modbus_tcp_information=[]\n self.write_log_object=write_log()\n self.write_log_object.delete_old_log_file()\n self.datetime_object=epoch_to_datetime()\n\n @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER) #程式開始執行時,會先到這裡針對OvS設定相關的資訊\n # set_ev_cls(ev_cls, dispatchers=None): 事件接收\n # set_ev_cls 為 是一個用於將方法註冊成 Ryu 事件處理器的一個修飾器,被修飾的 方法將會成為一個事件處理器。\n # dispatchers 為 該事件處理器將會在哪些談判階段(negotiation phases) 去接收此一類型的事件。\n def switch_features_handler(self, ev):# 一開始 Switch 連上 Controller 時的初始設定 Function\n datapath = ev.msg.datapath # 接收 OpenFlow 交換器實例\n ofproto = datapath.ofproto # OpenFlow 交換器使用的 OpenFlow 協定版本\n parser = datapath.ofproto_parser # 處理 OpenFlow 協定的 parser(解析)\n\n # install table-miss flow entry\n #\n # We specify NO BUFFER to max_len of the output action due to\n # OVS bug. At this moment, if we specify a lesser number, e.g.,\n # 128, OVS will send Packet-In with invalid buffer_id and\n # truncated packet data. In that case, we cannot output packets\n # correctly. The bug has been fixed in OVS v2.1.0.\n\n # 首先新增一個空的 match,也就是能夠 match 任何封包的 match rule\n match = parser.OFPMatch()\n # 指定這一條 Table-Miss FlowEntry 的對應行為\n # 把所有不知道如何處理的封包都送到 Controller\n actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,\n ofproto.OFPCML_NO_BUFFER)]\n # 把 Table-Miss FlowEntry 設定至 Switch,並指定優先權為 0 (最低)\n self.add_flow(datapath, 0, match, actions)\n #---------新增flow到ovs上----------\n def add_flow(self, datapath, priority, match, actions, buffer_id=None):\n # 取得與 Switch 使用的 IF 版本 對應的 OF 協定及 parser\n ofproto = datapath.ofproto\n parser = datapath.ofproto_parser\n # Instructions 是定義當封包滿足 match 時,所要執行的動作\n # 因此把 action 以 OFPInstructionActions 包裝起來\n inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,\n actions)]\n if buffer_id:\n mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,\n priority=priority, match=match,\n instructions=inst)\n else:\n # FlowMod Function 可以讓我們對 Switch 寫入由我們所定義的 Flow Entry\n mod = parser.OFPFlowMod(datapath=datapath, priority=priority,\n match=match, instructions=inst)\n datapath.send_msg(mod)\n # 處理ovs傳送過來的封包\n @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)\n def _packet_in_handler(self, ev):\n # If you hit this you might want to increase\n # the \"miss_send_length\" of your switch\n if ev.msg.msg_len < ev.msg.total_len:\n self.logger.debug(\"packet truncated: only %s of %s bytes\",\n ev.msg.msg_len, ev.msg.total_len)\n msg = ev.msg\n # self.write_log_object.write_log_txt_2(\"ev.msg=\"+str(msg))\n print(\"-----------------------\")\n self.packet_timestamp=getattr(ev, 'timestamp', None) #取得 ev 裡面的 timestamp 。getattr 『取得』class 內定義變數的值\n self.packet_datetime=self.datetime_object.get_datetime(self.packet_timestamp)\n datapath = msg.datapath\n ofproto = datapath.ofproto\n parser = datapath.ofproto_parser\n in_port = msg.match['in_port']\n print('in_port='+str(in_port))\n\n pkt = packet.Packet(msg.data)\n eth = pkt.get_protocols(ethernet.ethernet)[0]\n\n if eth.ethertype == ether_types.ETH_TYPE_LLDP:\n # ignore lldp packet\n return\n dst = eth.dst\n src = eth.src\n\n if self.pkt.get_protocols(ipv4.ipv4):\n self.ipv4=self.pkt.get_protocols(ipv4.ipv4)[0]\n self.ipv4_src = self.ipv4.src\n self.ipv4_dst = self.ipv4.dst\n self.ipv4_protocol=self.ipv4.proto\n self.ipv4_services=self.ipv4.tos\n if self.pkt.get_protocols(tcp.tcp):\n self.tcp=self.pkt.get_protocols(tcp.tcp)[0]\n self.tcp_src_port=self.tcp.src_port\n self.tcp_dst_port=self.tcp.dst_port\n self.tcp_seq_number=self.tcp.seq\n self.tcp_ack=self.tcp.ack\n self.tcp_flags=self.tcp.bits\n #----- 顯示ipv4與tcp的內容\n if self.pkt.get_protocols(ipv4.ipv4):\n print(\"ipv4_src=\"+str(self.ipv4_src))\n print(\"ipv4_dst=\"+str(self.ipv4_dst))\n if self.pkt.get_protocols(tcp.tcp):\n print(\"tcp=\"+str(self.tcp))\n print(\"tcp_seq_number=\"+str(self.tcp_seq_number))\n\n #---------將資料寫到log檔----------------\n if self.pkt.get_protocols(tcp.tcp):\n self.write_log_object.write_log_txt(\"-----------------\")\n self.write_log_object.write_log_txt(\"ev=\"+str(ev))\n self.write_log_object.write_log_txt(\"ev.msg=\"+str(ev.msg))\n self.write_log_object.write_log_txt(\"packet_timestamp=\"+str(self.packet_timestamp))\n self.write_log_object.write_log_txt(\"packet_datetime=\"+str(self.packet_datetime))\n self.write_log_object.write_log_txt(\"ev.msg.data=\"+str(self.data))\n self.write_log_object.write_log_txt(\"datapath=\"+str(self.datapath))\n self.write_log_object.write_log_txt(\"parser=\"+str(self.parser))\n self.write_log_object.write_log_txt(\"in_port=\"+str(self.in_port))\n self.write_log_object.write_log_txt(\"pkt=\"+str(self.pkt))\n # self.write_log_object.write_log_txt(\"pkt_len=\"+str(self.pkt.__len__()))\n self.write_log_object.write_log_txt(\"eth=\"+str(self.eth))\n self.write_log_object.write_log_txt(\"dst=\"+str(self.dst))\n self.write_log_object.write_log_txt(\"src=\"+str(self.src))\n # if self.pkt.get_protocols(ipv4.ipv4):\n self.write_log_object.write_log_txt(\"ipv4=\"+str(self.ipv4))\n self.write_log_object.write_log_txt(\"ipv4.src=\"+str(self.ipv4_src))\n self.write_log_object.write_log_txt(\"ipv4.dst=\"+str(self.ipv4_dst))\n # if self.pkt.get_protocols(tcp.tcp):\n self.write_log_object.write_log_txt(\"tcp=\"+str(self.tcp))\n self.write_log_object.write_log_txt(\"tcp_src_port=\"+str(self.tcp_src_port))\n self.write_log_object.write_log_txt(\"tcp_dst_port=\"+str(self.tcp_dst_port))\n self.write_log_object.write_log_txt(\"tcp_seq_number=\"+str(self.tcp_seq_number))\n \n #----------------- 儲存 TCP連線狀態時間------------------------------------------------------#\n if self.tcp_dst_port==502 and self.tcp.has_flags(tcp.TCP_SYN): #modbus_tcp 建立連線\n self.temp_date_list={}\n self.temp_date_list['ipv4_src']=self.ipv4_src\n self.temp_date_list['ipv4_dst']=self.ipv4_dst\n self.temp_date_list['src_port']=self.tcp_src_port\n self.temp_date_list['dst_port']=self.tcp_dst_port\n self.temp_date_list['tcp_connection_time']=[]\n self.temp_tcp_connection_time={}\n self.temp_tcp_connection_time_array=[]\n if len(self.modbus_tcp_information)>0:\n for i in range(len(self.modbus_tcp_information)):\n if self.modbus_tcp_information[i]['ipv4_src']==self.ipv4_src and self.modbus_tcp_information[i]['ipv4_dst']==self.ipv4_dst and len(self.modbus_tcp_information[i]['tcp_connection_time'])>0:\n self.temp_tcp_connection_time={}\n self.temp_tcp_connection_time_array=[]\n self.temp_tcp_connection_time['tcp_syn_time']=self.packet_timestamp\n self.temp_tcp_connection_time_array.append(self.temp_tcp_connection_time)\n self.modbus_tcp_information[i]['tcp_connection_time'].append(self.temp_tcp_connection_time_array)\n else:\n self.temp_tcp_connection_time['tcp_syn_time']=self.packet_timestamp\n self.temp_tcp_connection_time_array.append(self.temp_tcp_connection_time)\n self.temp_date_list['tcp_connection_time'].append(self.temp_tcp_connection_time_array)\n self.modbus_tcp_information.append(self.temp_date_list)\n else:\n self.temp_tcp_connection_time['tcp_syn_time']=self.packet_timestamp\n self.temp_tcp_connection_time_array.append(self.temp_tcp_connection_time)\n self.temp_date_list['tcp_connection_time'].append(self.temp_tcp_connection_time_array)\n self.modbus_tcp_information.append(self.temp_date_list)\n print('packet_is:SYN')\n self.write_log_object.write_log_txt('packet_is:SYN')\n print('self.modbus_tcp_information='+str(self.modbus_tcp_information))\n self.write_log_object.write_log_txt('self.modbus_tcp_information='+str(self.modbus_tcp_information))\n # self.tcp.\n if self.tcp_src_port==502 and self.tcp.has_flags(tcp.TCP_FIN,tcp.TCP_ACK): #modbus_tcp 斷線\n if len(self.modbus_tcp_information)>0:\n for i in range(len(self.modbus_tcp_information)):\n if self.modbus_tcp_information[i]['ipv4_src']==self.ipv4_dst and self.modbus_tcp_information[i]['ipv4_dst']==self.ipv4_src and len(self.modbus_tcp_information[i]['tcp_connection_time'])>0:\n for j in range(len(self.modbus_tcp_information[i]['tcp_connection_time'])):\n if len(self.modbus_tcp_information[i]['tcp_connection_time'][j][0])<2:\n self.modbus_tcp_information[i]['tcp_connection_time'][j][0]['tcp_fin_time']=self.packet_timestamp #放入結束時間\n #計算 duration_time\n self.modbus_tcp_information[i]['tcp_connection_time'][j][0]['duration_time']=self.modbus_tcp_information[i]['tcp_connection_time'][j][0]['tcp_fin_time']-self.modbus_tcp_information[i]['tcp_connection_time'][j][0]['tcp_syn_time']\n print('packet_is:FIN')\n self.write_log_object.write_log_txt('packet_is:FIN')\n print('self.modbus_tcp_information='+str(self.modbus_tcp_information))\n self.write_log_object.write_log_txt('self.modbus_tcp_information='+str(self.modbus_tcp_information))\n\n #------------------------------------解析Modbus TCP(應用層)的部分 ------------------------------------#\n if self.pkt.__len__()==4 and (self.tcp_src_port==502 or self.tcp_dst_port==502):\n self.write_log_object.write_log_txt(\"---------modbus tcp-------------\")\n #錄製modbus tcp 的封包\n self.packet_save_object.write_packet_timestamp_to_txt(self.packet_timestamp)\n self.packet_save_object.write_packet_to_txt(self.pkt)\n \n print(\"__iter__=\"+str(self.pkt.__iter__()))\n print(\"__len__=\"+str(self.pkt.__len__()))\n print(\"__getitem__=\"+str(self.pkt.__getitem__(3)))\n self.write_log_object.write_log_txt(\"__iter__=\"+str(self.pkt.__iter__()))\n self.write_log_object.write_log_txt(\"__len__=\"+str(self.pkt.__len__()))\n self.write_log_object.write_log_txt(\"__getitem__=\"+str(self.pkt.__getitem__(3)))\n \n mb=modbus_tcp.modbus_tcp()\n mb.get_modbus_tcp(self.tcp_src_port,self.tcp_dst_port,self.pkt.__getitem__(3))\n self.write_log_object.write_log_txt(\"****************\")\n self.write_log_object.write_log_txt(\"mb.t_id=\"+str(mb.t_id))\n self.write_log_object.write_log_txt(\"mb.p_id=\"+str(mb.p_id))\n self.write_log_object.write_log_txt(\"mb.modbus_len=\"+str(mb.modbus_len))\n self.write_log_object.write_log_txt(\"mb.u_id=\"+str(mb.u_id))\n if self.tcp_dst_port==502: #request \n print(\"*****request >>>> *****\")\n print(\"mb.fun_code=\"+str(mb.fun_code))\n self.write_log_object.write_log_txt(\"*****request >>>> *****\")\n self.write_log_object.write_log_txt(\"mb.fun_code=\"+str(mb.fun_code))\n if mb.fun_code==5:\n print(\"mb.reference_number=\"+str(mb.reference_number))\n print(\"mb.modbus_5_data=\"+str(mb.modbus_5_data))\n self.write_log_object.write_log_txt(\"mb.reference_number=\"+str(mb.reference_number))\n self.write_log_object.write_log_txt(\"mb.modbus_5_data=\"+str(mb.modbus_5_data))\n else:\n print(\"mb.reference_number=\"+str(mb.reference_number))\n print(\"mb.Bit_Count=\"+str(mb.Bit_Count))\n print(\"mb.data_lenth=\"+str(mb.data_lenth))\n self.write_log_object.write_log_txt(\"mb.reference_number=\"+str(mb.reference_number))\n self.write_log_object.write_log_txt(\"mb.Bit_Count=\"+str(mb.Bit_Count)) \n self.write_log_object.write_log_txt(\"mb.data_lenth=\"+str(mb.data_lenth))\n elif self.tcp_src_port==502: # response\n print(\"*****response <<<<< *****\")\n print(\"mb.fun_code=\"+str(mb.fun_code))\n self.write_log_object.write_log_txt(\"*****response <<<<< *****\")\n self.write_log_object.write_log_txt(\"mb.fun_code=\"+str(mb.fun_code))\n if mb.fun_code==5:\n print(\"mb.reference_number=\"+str(mb.reference_number))\n print(\"mb.modbus_5_data=\"+str(mb.modbus_5_data))\n self.write_log_object.write_log_txt(\"mb.reference_number=\"+str(mb.reference_number))\n self.write_log_object.write_log_txt(\"mb.modbus_5_data=\"+str(mb.modbus_5_data))\n else:\n print(\"mb.byte_count=\"+str(mb.byte_count))\n print(\"mb.modbus_data=\"+str(mb.modbus_data))\n self.write_log_object.write_log_txt(\"mb.byte_count=\"+str(mb.byte_count))\n self.write_log_object.write_log_txt(\"mb.modbus_data=\"+str(mb.modbus_data))\n\n dpid = format(datapath.id, \"d\").zfill(16)\n self.mac_to_port.setdefault(dpid, {})\n\n self.logger.info(\"packet in %s %s %s %s\", dpid, src, dst, in_port)\n\n # learn a mac address to avoid FLOOD next time.\n self.mac_to_port[dpid][src] = in_port\n\n if dst in self.mac_to_port[dpid]:\n out_port = self.mac_to_port[dpid][dst]\n else:\n out_port = ofproto.OFPP_FLOOD\n\n actions = [parser.OFPActionOutput(out_port)]\n\n # install a flow to avoid packet_in next time\n if out_port != ofproto.OFPP_FLOOD:\n match = parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_src=src)\n # verify if we have a valid buffer_id, if yes avoid to send both\n # flow_mod & packet_out\n if msg.buffer_id != ofproto.OFP_NO_BUFFER:\n self.add_flow(datapath, 1, match, actions, msg.buffer_id)\n return\n else:\n self.add_flow(datapath, 1, match, actions)\n data = None\n if msg.buffer_id == ofproto.OFP_NO_BUFFER:\n data = msg.data\n\n out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,\n in_port=in_port, actions=actions, data=data)\n datapath.send_msg(out)\n","repo_name":"ken7428731/ryu","sub_path":"ryu/app/test_simple_switch_13.py","file_name":"test_simple_switch_13.py","file_ext":"py","file_size_in_byte":17022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74988024691","text":"import json\nimport time\nimport logging\nimport re\nimport unittest\nfrom seleniumwire import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.alert import Alert\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import (\n NoAlertPresentException,\n NoSuchElementException,\n InvalidSelectorException,\n TimeoutException,\n)\nfrom selenium.webdriver.firefox.options import Options as FirefoxOptions\nfrom seleniumwire import handler\nimport urllib.parse\n\n\nclass Tests(unittest.TestCase):\n def readJSONFile(self, filePath):\n with open(filePath, \"r\") as dataFile:\n data = json.load(dataFile)\n return data\n\n def setUp(self):\n options = webdriver.FirefoxOptions()\n profile = webdriver.FirefoxProfile()\n\n options.set_preference(\n \"general.useragent.override\",\n \"Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Mobile Safari/537.36 Edg/115.0.1901.188\",\n )\n\n self.driver = webdriver.Firefox(options=options, firefox_profile=profile)\n\n self.driver.header_overrides = {\n \"Sec-Ch-Ua\": '\"Not/A)Brand\";v=\"99\", \"Microsoft Edge\";v=\"115\", \"Chromium\";v=\"115\"'\n }\n\n def fetchBook(self):\n driver = self.driver\n\n driver.get(\"https://www.amazon.in/\")\n\n # Read the JSON data from the file\n with open(\n \"/path/to/file.json\",\n \"r\",\n ) as json_file:\n books_data = json.load(json_file)\n\n for book_data in books_data:\n book_name = book_data[\"NAME of BOOKS\"]\n author_name = book_data[\"AUTHOR\"]\n\n search_query = f\"{book_name} by {author_name}\"\n\n input_field = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located(\n (By.XPATH, '//*[@id=\"nav-search-keywords\"]')\n )\n )\n input_field.clear()\n input_field.send_keys(search_query)\n\n submit_button = WebDriverWait(driver, 5).until(\n EC.element_to_be_clickable(\n (\n By.XPATH,\n \"/html/body/div[1]/header/div[1]/div[4]/form/div[2]/div/input\",\n )\n )\n )\n submit_button.click()\n\n try:\n image_src = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located(\n (\n By.XPATH,\n \"//img[@class='s-image']\",\n )\n )\n )\n\n book_link = image_src.get_attribute(\"src\")\n book_data[\"image_url\"] = book_link\n\n\n except TimeoutException:\n book_data[\"image_url\"] = None\n\n with open(\n \"/path/to/file.json\",\n \"w\",\n ) as json_file:\n json.dump(books_data, json_file)\n\n def tearDown(self):\n self.driver.quit()\n\n\nif __name__ == \"__main__\":\n suite = unittest.TestSuite()\n suite.addTest(Tests(\"fetchBook\"))\n unittest.TextTestRunner().run(suite)\n\n\n\n# Sample json data\n\n# [\n# {\n# \"Barcode I D\": 1,\n# \"CALL NO\": \"624 PAT\",\n# \"NAME of BOOKS\": \"Elements of Civil Engineering\",\n# \"AUTHOR\": \"Patel J N & Gohil M B\",\n# \"PUBLISHERS\": \"Atul Prakashan, Ahmedabad\",\n# \"YEAR\": 1999,\n# \"Rec Date\": \"11.07.00\",\n# \"BILL NO\": \"1177/Roopal\",\n# \"PRICE\": 150,\n# \"ISBN\": 0,\n# \"BRANCH\": \"Civil\",\n# \"PAGE\": 314,\n# \"Dis\": 30,\n# \"Net Amt.\": 120\n# },\n# {\n# \"Barcode I D\": 2,\n# \"CALL NO\": \"510 SHA\",\n# \"NAME of BOOKS\": \"Engineering Mathematics\",\n# \"AUTHOR\": \"Sharma G S\",\n# \"PUBLISHERS\": \"CBS Publishers\",\n# \"YEAR\": 1999,\n# \"Rec Date\": \"11.07.00\",\n# \"BILL NO\": \"1177/Roopal\",\n# \"PRICE\": 150,\n# \"ISBN\": 0,\n# \"BRANCH\": \"Maths\",\n# \"PAGE\": 90,\n# \"Dis\": 30,\n# \"Net Amt.\": 120\n# }\n# ]\n","repo_name":"hetsonii/Scripts","sub_path":"amazon_image_scaper.py","file_name":"amazon_image_scaper.py","file_ext":"py","file_size_in_byte":4185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24093353922","text":"import pyttsx3\nimport os\nfrom google.cloud import texttospeech\nfrom playsound import playsound\nimport multiprocessing\nimport sox\n\n\ndef main(text):\n synthesis_input = texttospeech.SynthesisInput(text=text)\n\n client = texttospeech.TextToSpeechClient()\n\n voice = texttospeech.VoiceSelectionParams({\n \"name\": 'en-AU-Neural2-B',\n \"language_code\": 'en-AU'\n })\n\n audio_config = texttospeech.AudioConfig({\n \"audio_encoding\": texttospeech.AudioEncoding.LINEAR16,\n \"pitch\": 0,\n \"speaking_rate\": 0.90\n })\n\n response = client.synthesize_speech(\n input=synthesis_input,\n voice=voice,\n audio_config=audio_config\n )\n\n with open('google_tts_output.wav', 'wb') as output:\n output.write(response.audio_content)\n output.close()\n\n tfm = sox.Transformer()\n tfm.flanger(delay=20, phase=100, speed=0.5)\n tfm.reverb(room_scale=90, pre_delay=22, reverberance=25, wet_gain=0.86)\n tfm.flanger(delay=20, phase=100, speed=0.5)\n\n print(\"Processing Audio...\")\n tfm.build_file(os.path.join(os.getcwd(), \"google_tts_output.wav\"),\n os.path.join(os.getcwd(), \"google_tts_output_processed.wav\"))\n\n file_name = os.path.join(os.getcwd(), \"google_tts_output_processed.wav\")\n process = multiprocessing.Process(target=playsound, args=(file_name,))\n process.start()\n process.join()\n process.terminate()\n os.remove(file_name)\n os.remove(os.path.join(os.getcwd(), \"google_tts_output.wav\"))\n\n\ndef speak(text):\n engine = pyttsx3.init()\n voices = engine.getProperty('voices')\n engine.setProperty('voice', voices[0].id)\n engine.setProperty('rate', 180)\n engine.save_to_file(text, \"speech_file.mp3\")\n engine.runAndWait()\n engine.stop()\n\n tfm = sox.Transformer()\n tfm.flanger(delay=20, phase=100, speed=0.5)\n tfm.reverb(room_scale=90, pre_delay=22, reverberance=25, wet_gain=0.86)\n tfm.flanger(delay=20, phase=100, speed=0.5)\n\n print(\"Processing Audio...\")\n tfm.build_file(os.path.join(os.getcwd(), \"speech_file.mp3\"),\n os.path.join(os.getcwd(), \"speech_file_processed.wav\"))\n\n file_name = os.path.join(os.getcwd(), \"speech_file_processed.wav\")\n process = multiprocessing.Process(target=playsound, args=(file_name,))\n process.start()\n process.join()\n process.terminate()\n os.remove(file_name)\n os.remove(os.path.join(os.getcwd(), \"speech_file.mp3\"))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"zack-seiler/MEMPHIS_AI","sub_path":"texttospeech.py","file_name":"texttospeech.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7959834054","text":"# How many seconds are in 42 minutes 42seconds\nfrom re import T\n\n\nmins = 42\nsecs = 42 \n\n# 60 seconds in 1 minute\n\nt_mins = mins * 60 \nt_secs = t_mins + secs\n\nprint(t_secs)\n\n# how many miles are in 10kms, 1.61kms = a mile\nmiles = 1.61\nkm = 10\n\nt_miles = km / miles\nprint (t_miles)\n\n# if you run a 10km race in 42mins 42secs, what is your average pace (time per mile in minutes and seconds)? what is your average speed in miles per hour\n\ndistance = 10\n\n# time \ntim_mins = 42\ntim_secs = 42\n\n# Minutes\nminutes = tim_mins + (tim_secs/60)\nprint (minutes)\n\n# Seconds\nseconds = (tim_secs*60) + 42\nprint (seconds)\n\n# Hour\nhour = minutes/60\nprint(hour)\n\n# Miles per Minutes\nmiles_mins = t_miles / minutes\nprint (miles_mins)\n\n# Miles per Seconds\nmiles_secs = t_miles / seconds\nprint (miles_secs)\n\n# Miles per Hour\nmiles_hour = t_miles / hour\nprint (miles_hour)\n\n# The volume of a sphere with radius r =4/3 pi r^3 what is the volume of a sphere with radius 5 \npie = 3.142\nr = 5 \nvolume = 4/3 * pie * r**3\n\nprint (volume)\n\n# If i leave my house at 6:52am and run 1 mile at an easy pace (8:15 per mile) then 3 miles at tempo (7:12 per mile) and 1 mile at easy pace again what time do get home for breakfast\nstart_time = (6*60 + 52) *60\neasy_pace = (8*60 + 15) *2\ntempo_pace = (7*60 + 12) *3\n\nrun_time = easy_pace + tempo_pace\n\nhome_time = start_time+run_time\n\nbreak_fast_hour = home_time//3600\nbreak_fast_min = (home_time%3600) // 60\nbreak_fast_sec = (home_time%3600) % 60\n\nprint (f\"{break_fast_hour}:{break_fast_min}:{break_fast_sec}am\")\n\n\nt = 'tomisin'\nprint (t)","repo_name":"MajorTomidev/mathsclass","sub_path":"classwork.py","file_name":"classwork.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27505651207","text":"# coding=utf-8\n\n'''\n\n二叉树深度遍历\n\n'''\n\n# https://leetcode-cn.com/problems/balanced-binary-tree/\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def isBalanced(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: bool\n \"\"\"\n self.is_balanced = True\n def dfs(node):\n if not node:\n return 0\n left_height = dfs(node.left)\n right_height = dfs(node.right)\n if abs(left_height - right_height) > 1:\n self.is_balanced = False\n return max(left_height, right_height) + 1\n dfs(root)\n return self.is_balanced","repo_name":"zhuwenbo1988/nlp","sub_path":"leetcode/easy/tree/110_isBalanced.py","file_name":"110_isBalanced.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"4977823875","text":"from django.urls import path, include\nfrom django.contrib.auth import views as auth_views\n\nfrom .views import (\n user_create_view,\n user_login_view,\n user_logout_view,\n user_profile_view,\n user_comments_view,\n user_favourites_view,\n user_feedbacks_view,\n user_results_view,\n)\n\napp_name = 'accounts'\n\nurlpatterns = [\n path('create/', user_create_view, name='create'),\n path('login/', user_login_view, name='login'),\n path('logout/', user_logout_view, name='logout'),\n path('/', user_profile_view, name='profile'),\n path('favourites/', user_favourites_view, name='favourites'),\n path('comments/', user_comments_view, name='comments'),\n path('feedbacks/', user_feedbacks_view, name='feedbacks'),\n path('results/', user_results_view, name='results'),\n]","repo_name":"SofiaHorvath91/sk_project","sub_path":"sk_blog/accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6591996032","text":"import cv2\nimport numpy as np\nimport os\n# import CreateDataset, UploadToS3,s3_signed_url\n# from s3_signed_url import access_keys\n# from NotifyUser import notify_user\n\n\nface_cascade = cv2.CascadeClassifier(\"/home/aditya/College Project/data/haarcascade_frontalface_alt2.xml\")\ncap = cv2.VideoCapture(0)\n\nrecognizer = cv2.face.LBPHFaceRecognizer_create()\nrecognizer.read(\"/home/aditya/College Project/trainner.yml\")\n\nid = 0\nfont = cv2.FONT_HERSHEY_SIMPLEX\nidentity = \"Unknown\"\n\nwhile True:\n ret, frame = cap.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray, scaleFactor=1.5, minNeighbors=5)\n\n for (x, y, w, h) in faces:\n cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)\n id,conf = recognizer.predict(gray[y:y+h,x:x+w])\n\n if id == 1:\n identity = \"Aditya\"\n\n # else:\n # CreateDataset.create_dataset(intruder = True)\n # bucket_key = UploadToS3.upload_to_s3()\n # url = s3_signed_url.create_presigned_url(access_keys[\"BUCKET_NAME\"],bucket_key)\n # notify_user(url = url)\n\n cv2.putText(frame,identity,(x,y+h),font,0.55,(0,255,0,1))\n cv2.imshow(\"frame\", frame)\n\n key = cv2.waitKey(1)\n if key == ord(\"q\"):\n break\n\ncap.release()\ncv2.destroyAllWindows()","repo_name":"aditya-rawat-99/College-Project","sub_path":"jnajna.py","file_name":"jnajna.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14854489833","text":"from transformers import pipeline, PipelineException\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nimport re\n\n# download nltk resources\nnltk.download('popular', quiet=True)\n\n# load NLP pipeline\ntry:\n nlp = pipeline(\"zero-shot-classification\")\nexcept PipelineException:\n print('Unable to load NLP pipeline.')\n exit()\n\n# define sectors and keywords\nsectors = ['Automobile', 'Banking', 'Cement', 'Consumer Durables', 'Energy', 'FMCG', 'Healthcare', 'IT', 'Media & Entertainment', 'Metal', 'Oil & Gas', 'Pharmaceuticals', 'Realty', 'Services', 'Telecom', 'Textiles']\n\nkeywords = {\n 'Automobile': 'car, vehicle, auto, tyre, motor',\n 'Banking': 'bank, loan, interest rate, credit, debit',\n 'Cement': 'cement, concrete, construction, infrastructure, building',\n 'Consumer Durables': 'electronics, home appliance, furniture, household goods, kitchenware',\n 'Energy': 'oil, gas, renewable, electricity, power',\n 'FMCG': 'food, beverage, personal care, home care, cleaning',\n 'Healthcare': 'pharmaceutical, medical device, hospital, clinic, wellness',\n 'IT': 'software, hardware, computer, internet, technology',\n 'Media & Entertainment': 'movies, music, TV, radio, gaming',\n 'Metal': 'steel, aluminium, copper, iron, metal fabrication',\n 'Oil & Gas': 'oil, gas, refinery, exploration, production',\n 'Pharmaceuticals': 'drug, medicine, vaccine, research, development',\n 'Realty': 'real estate, property, construction, housing, apartment',\n 'Services': 'transportation, logistics, hospitality, consulting, education',\n 'Telecom': 'telecommunication, mobile, broadband, satellite, network',\n 'Textiles': 'clothing, fabrics, apparel, fashion, yarn'\n}\n\n# preprocess text\ndef preprocess(text):\n stop_words = set(stopwords.words('english'))\n word_tokens = word_tokenize(text.lower())\n filtered_words = [word for word in word_tokens if word not in stop_words]\n return ' '.join(filtered_words)\n\n# predict sector based on text input\ndef predict_sector(text):\n try:\n text = preprocess(text)\n scores = nlp(text, sectors, multi_label=True)\n max_score = 0\n predicted_sector = None\n for i in range(len(sectors)):\n score = scores['scores'][i]\n if score > max_score:\n max_score = score\n predicted_sector = sectors[i]\n return predicted_sector\n except (PipelineException, ValueError):\n print('Unable to predict sector.')\n exit()\n\n# check for potential SQL injection attack\ndef check_sql_injection(text):\n sql_injection_regex = re.compile(r'\\'\\s*or\\s*\\'|\\\"\\s*or\\s*\\\"|\\s*;|\\s*--')\n if sql_injection_regex.search(text):\n print('Potential SQL injection attack detected.')\n exit()\n\n# check for potential XSS attack\ndef check_xss(text):\n xss_regex = re.compile(r' List[str]:\n results=[]\n d = {'2':\"abc\", '3':\"def\", '4':\"ghi\", '5':\"jkl\", '6':\"mno\", '7':\"pqrs\", '8':\"tuv\", '9':\"wxyz\"}\n def dfs(index:int, s:string):\n if len(s) == len(digits):\n results.append(s)\n return\n\n for c in d[digits[i]]:\n dfs(i+1, s+c)\n\n return\n\n if not digits:\n return []\n\n dfs(0, \"\")\n return results\n","repo_name":"bhyunki/leetcode","sub_path":"srcs/python3/17_letter_combinations_of_a_phone_number.py","file_name":"17_letter_combinations_of_a_phone_number.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4485718052","text":"import math\nimport warnings\nfrom typing import List, Tuple\nimport sys\n\nfrom .sentence import Sentence\n\nclass BLEU:\n \"\"\"\n BLEU class takes multiple reference statements and construct a class to check and\n to compute scores for multiple different translated statements. Currently, only\n supports till 4-gram.\n \n Attributes\n ----------\n refs: List[Sentence]\n list of reference sentences\n weights: Tuple[float, float, float, float]\n tuple of weights, ith value represents weight for i-gram\n suppress_warnings: bool\n Whenever a specified n-gram's overlaps are not found, score turns to zero\n and generates a warning. This can be suppressed if this is set to True.\n\n Example\n -------\n ref1 = ['love', 'can', 'always', 'find', 'a', 'way']\n ref2 = ['love', 'makes', 'anything', 'possible']\n tran1 = ['the', 'love', 'can', 'always', 'do']\n tran2 = ['love', 'can', 'make', 'anything', 'possible']\n b = BLEU([ref1, ref2], (0.4, 0.35, 0.25, 0, ))\n score1 = b.compute_score(tran1)\n score2 = b.compute_score(tran2)\n \"\"\"\n \n default_wt = (0.25, 0.25, 0.25, 0.25,)\n __MAX_NGRAM = 4\n __EPSILON = 1e-6\n \n def __init__(self, refs: List[List[str]], weights: Tuple[float, float, float, float] = default_wt,\n suppress_warnings: bool = False):\n \"\"\"\n :param refs: List of list of tokens of reference translations.\n :param weights: tuple of weights, ith value represents weight for i-gram. Sum of weights must be 1.\n Default: (0.25, 0.25, 0.25, 0.25,)\n :param suppress_warnings: Whenever a specified n-gram's overlaps are not found, score turns to zero\n and generates a warning. This can be suppressed if this is set to True.\n \"\"\"\n assert len(refs) > 1, \"Must pass at least one reference sentence\"\n assert sum(weights) - 1 < BLEU.__EPSILON, \"All weights should sum to 1\"\n self.ngrams = tuple((i+1) for i in range(BLEU.__MAX_NGRAM) if not (weights[i] < BLEU.__EPSILON))\n self.weights = (0.0,) + weights # for n-gram consistent indices, wt[1] is for 1-gram, etc.\n self.refs = [Sentence(ref, self.ngrams) for ref in refs]\n self.suppress_warnings = suppress_warnings\n \n def find_closest_ref(self, src: Sentence):\n min_dist = abs(len(src) - len(self.refs[0]))\n closest_ref = self.refs[0]\n \n for ref in self.refs:\n curr_dist = abs(len(src) - len(ref))\n \n if curr_dist < min_dist:\n min_dist = curr_dist\n closest_ref = ref\n elif curr_dist == min_dist and len(closest_ref) > len(ref):\n closest_ref = ref\n \n return closest_ref\n \n def compute_precision(self, source: Sentence) -> List[float]:\n \"\"\"\n Computes modified precision. Optimizes by only computing it for useful n-grams,\n that is, n-grams with significant weights.\n :param source: source sentence\n :return: list of precision scores with nth index corresponding to n-gram, provided it is useful, else None.\n \"\"\"\n precision = [sys.float_info.min] * (BLEU.__MAX_NGRAM + 1)\n \n # computing modified n-gram precision values\n for n in self.ngrams: # iterating over n-grams\n denom = sum(source.counters[n].values())\n \n numer = sum(\n min(max(r_i.counters[n][gram] for r_i in self.refs), source.counters[n][gram]) for gram in\n source.counters[n].keys())\n if numer == 0:\n numer = sys.float_info.min\n if not self.suppress_warnings:\n warnings.warn(f\"No {n}-gram overlaps found. No contribution towards score.\")\n \n precision[n] = numer / denom\n \n return precision\n \n def compute_score(self, c: List[str]) -> float:\n \"\"\"\n Computes BLEU score for translated sentence c with respect\n to the reference sentences and specified weight.\n :param c: List of tokens of translated sentence.\n :return: BLEU score for the translated sentence.\n \"\"\"\n \n source = Sentence(c, self.ngrams)\n precision = self.compute_precision(source)\n \n # finding closest reference\n len_closest_ref = len(self.find_closest_ref(source))\n \n brevity_penalty = 1 if len(source) >= len_closest_ref else math.exp(1 - len_closest_ref / len(source))\n \n return brevity_penalty * math.exp(sum(self.weights[i] * math.log(p_i)\n for (i, p_i) in enumerate(precision[1:], start=1)))\n\n\nif __name__ == '__main__':\n refs = [\"love can always find a way\".split(), \"love makes anything possible\".split()]\n c1 = \"the love can always do\".split()\n c2 = \"love can make anything possible\".split()\n bleu = BLEU(refs, weights=(0.5, 0.5, 0, 0,))\n print(bleu.compute_score(c1))\n print(bleu.compute_score(c2))\n","repo_name":"shvms/bleuper","sub_path":"bleuper/bleu.py","file_name":"bleu.py","file_ext":"py","file_size_in_byte":4646,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"14472746384","text":"import pynini\nfrom nemo_text_processing.inverse_text_normalization.en.utils import get_abs_path\nfrom nemo_text_processing.text_normalization.en.graph_utils import (\n NEMO_DIGIT,\n NEMO_NOT_SPACE,\n NEMO_SIGMA,\n GraphFst,\n convert_space,\n delete_extra_space,\n delete_space,\n get_singulars,\n insert_space,\n)\n\nfrom en.decimal import Decimal\nfrom pynini.lib import pynutil\n\n\nclass Money:\n \"\"\"\n Finite state transducer for classifying money\n e.g. twelve dollars and five cents -> money { integer_part: \"12\" fractional_part: 05 currency: \"$\" }\n\n Args:\n cardinal: CardinalFst\n decimal: DecimalFst\n \"\"\"\n\n def __init__(self):\n # quantity, integer_part, fractional_part, currency\n digit_to_str = (\n pynini.invert(\n pynini.string_file(get_abs_path(\"data/numbers/digit.tsv\")).optimize()\n )\n | pynini.cross(\"0\", pynini.union(\"o\", \"oh\", \"zero\")).optimize()\n )\n\n str_to_digit = pynini.invert(digit_to_str)\n\n decimal = Decimal().fst\n\n # dollar standalone\n graph_dollar_standalone = (\n pynutil.insert(\"$\")\n + decimal\n + delete_space\n + (pynutil.delete(\"dollars\", -0.002) | pynutil.delete(\"dollar\", -0.001))\n )\n # cents\n graph_cent = pynutil.add_weight(\n pynutil.insert(\".0\")\n + str_to_digit\n + delete_space\n + (pynutil.delete(\"cents\", -0.002) | pynutil.delete(\"cent\", -0.001)),\n -0.005,\n ) | (\n pynutil.insert(\".\")\n + decimal\n + delete_space\n + (pynutil.delete(\"cents\", -0.002) | pynutil.delete(\"cent\", -0.001))\n )\n\n # cents standalone\n graph_cent_standalone = pynutil.add_weight(\n pynutil.insert(\"$0\") + graph_cent, +0.005\n )\n\n graph_dollar_plus_cent = graph_dollar_standalone | pynutil.add_weight(\n graph_dollar_standalone | pynutil.delete(\" and \") + graph_cent, -0.002\n )\n\n self.fst = graph_dollar_plus_cent | graph_cent_standalone.optimize()\n","repo_name":"nvidia-riva/speech-hints","sub_path":"en/money.py","file_name":"money.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"9204370616","text":"import pandas as pd\n\nfrom db.interface import open_connection, close_connection\nfrom db.player_table import get_latest_by_fifa_id\nfrom db.player_identity_table import insert\nfrom logger import logging\n\ndef main(conn):\n fifa_ids = pd.read_csv(\"fifa_ids.csv\")[\"fifa_id\"].values\n logging.info(f\"Inserting {len(fifa_ids)} players\")\n for fifa_id in fifa_ids:\n player_obj = {}\n player_df = get_latest_by_fifa_id(fifa_id, conn=conn)\n if player_df.shape[0] == 0:\n logging.warning(f\"No player found for fifa id {fifa_id}\")\n\n player_obj[\"fifa_name\"] = player_df[\"name\"].item()\n player_obj[\"fifa_id\"] = fifa_id\n insert(conn, **player_obj)\n conn.commit()\n\nif __name__ == '__main__':\n db_conn = open_connection()\n main(db_conn)\n close_connection(db_conn)\n","repo_name":"Villux/golden_goal","sub_path":"create_player_objects.py","file_name":"create_player_objects.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30632361943","text":"from misereNim import misereNim\n\nif __name__ == '__main__':\n \n entrada = int(input().strip())\n \n for i_tr in range(entrada):\n \n n = int(input().strip())\n \n s = list(map(int, input().rstrip().split()))\n \n resultado = misereNim(s)\n \n print(resultado + '\\n')","repo_name":"diegotpereira/Desafio-hackerrank-python","sub_path":"Algorithms/game-theory/misere-nim-1/tests/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70968897652","text":"print('GERADOR DE P.A')\ntermo = primeiro = int(input('Digite o primeiro termo da sua PA: '))\nrazao = int(input('Digite a razão de sua PA: '))\ncontar = 1\ntotal = 0\nmais = 10\nwhile mais != 0:\n total += mais\n while contar < total:\n print(\"{}\".format(termo),end='-')\n termo+=razao\n contar+=1\n mais = int(input('\\nQuantos termos você quer mostrar a mais ?'))\nprint('Progressão finalizada com {} termos mostrados.'.format(total),end=' ')\n\n","repo_name":"denisesakiyo/expython","sub_path":"exercicio62.py","file_name":"exercicio62.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20462512974","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sep 20, 2011\n\n@author: alex\n\nThis is a simple script to start schAnneal with $WORK_FOLDER/xlsConfig.xls for $X times, where $WORK_FOLDER is the first argument\nand $X is the second argument in seconds. This script should evolve to be able to have the type of config and the type of optimizer\nas an argument\n\n\"\"\"\nimport xlsConfig\nfrom annealOpt.schAnneal import SchAnneal\nfrom os import path\nimport sys\n\nimport numpy as np\nnp.seterr(over='ignore')\n\n\ndef makeScheduleLeague():\n workFolder = 'example_configs'\n maxTime = 20 * 60 # in seconds\n\n if len(sys.argv) > 1:\n workFolder = sys.argv[1]\n sys.stdout = open(path.join(workFolder, 'stdout'), 'w')\n sys.stderr = open(path.join(workFolder, 'stderr'), 'w')\n\n if len(sys.argv) > 2:\n maxTime = float(sys.argv[2]) * 60\n\n config = xlsConfig.ConfigLoader(path.join(workFolder, 'xlsConfig.xls')).getConfig()\n config.workFolder = workFolder\n _matchL = SchAnneal(maxTime=maxTime,verbosity=1).opt(config)\n#\n# htmlAnalysis = HtmlDoc()\n# htmlAnalysis.add( HtmlAnalysis( config, matchL) )\n# htmlAnalysis.write('example/analysis.html')\n\n# doc = HtmlDoc()\n# doc.add( HtmlSchedule(matchL) )\n# doc.write('example/schedule.html')\n\n\nif __name__ == \"__main__\":\n makeScheduleLeague()\n","repo_name":"jeanfrancisroy/happy-league","sub_path":"makeSchedule.py","file_name":"makeSchedule.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24277723839","text":"import cv2\r\n\r\n### Descição ###\r\n# Este código detecta o dicionário aruco em uma imagem utilizado para dar origem \r\n# ao aruco. Ele está baseado no seguinte tutorial:\r\n# https://pyimagesearch.com/2020/12/28/determining-aruco-marker-type-with-opencv-and-python/\r\n\r\n\r\n# define names of each possible ArUco tag OpenCV supports\r\nARUCO_DICT = {\r\n\t\"DICT_4X4_50\": cv2.aruco.DICT_4X4_50,\r\n\t\"DICT_4X4_100\": cv2.aruco.DICT_4X4_100,\r\n\t\"DICT_4X4_250\": cv2.aruco.DICT_4X4_250,\r\n\t\"DICT_4X4_1000\": cv2.aruco.DICT_4X4_1000,\r\n\t\"DICT_5X5_50\": cv2.aruco.DICT_5X5_50,\r\n\t\"DICT_5X5_100\": cv2.aruco.DICT_5X5_100,\r\n\t\"DICT_5X5_250\": cv2.aruco.DICT_5X5_250,\r\n\t\"DICT_5X5_1000\": cv2.aruco.DICT_5X5_1000,\r\n\t\"DICT_6X6_50\": cv2.aruco.DICT_6X6_50,\r\n\t\"DICT_6X6_100\": cv2.aruco.DICT_6X6_100,\r\n\t\"DICT_6X6_250\": cv2.aruco.DICT_6X6_250,\r\n\t\"DICT_6X6_1000\": cv2.aruco.DICT_6X6_1000,\r\n\t\"DICT_7X7_50\": cv2.aruco.DICT_7X7_50,\r\n\t\"DICT_7X7_100\": cv2.aruco.DICT_7X7_100,\r\n\t\"DICT_7X7_250\": cv2.aruco.DICT_7X7_250,\r\n\t\"DICT_7X7_1000\": cv2.aruco.DICT_7X7_1000,\r\n\t\"DICT_ARUCO_ORIGINAL\": cv2.aruco.DICT_ARUCO_ORIGINAL,\r\n\t\"DICT_APRILTAG_16h5\": cv2.aruco.DICT_APRILTAG_16h5,\r\n\t\"DICT_APRILTAG_25h9\": cv2.aruco.DICT_APRILTAG_25h9,\r\n\t\"DICT_APRILTAG_36h10\": cv2.aruco.DICT_APRILTAG_36h10,\r\n\t\"DICT_APRILTAG_36h11\": cv2.aruco.DICT_APRILTAG_36h11\r\n}\r\n\r\n# resize the image\r\ndef resize(image, width=None, height=None, inter=cv2.INTER_AREA):\r\n # initialize the dimensions of the image to be resized and\r\n # grab the image size\r\n dim = None\r\n (h, w) = image.shape[:2]\r\n\r\n # if both the width and height are None, then return the\r\n # original image\r\n if width is None and height is None:\r\n return image\r\n\r\n # check to see if the width is None\r\n if width is None:\r\n # calculate the ratio of the height and construct the\r\n # dimensions\r\n r = height / float(h)\r\n dim = (int(w * r), height)\r\n\r\n # otherwise, the height is None\r\n else:\r\n # calculate the ratio of the width and construct the\r\n # dimensions\r\n r = width / float(w)\r\n dim = (width, int(h * r))\r\n\r\n # resize the image\r\n resized = cv2.resize(image, dim, interpolation=inter)\r\n\r\n # return the resized image\r\n return resized\r\n\r\n# load the input image from disk and resize it\r\nprint(\"[INFO] loading image...\")\r\nimage = cv2.imread(\"aruco.jpeg\")\r\n\r\nimage = resize(image, width=600)\r\n\r\n# loop over the types of ArUco dictionaries\r\nfor (arucoName, arucoDict) in ARUCO_DICT.items():\r\n\t# load the ArUCo dictionary, grab the ArUCo parameters, and\r\n\t# attempt to detect the markers for the current dictionary\r\n\tarucoDict = cv2.aruco.Dictionary_get(arucoDict)\r\n\tarucoParams = cv2.aruco.DetectorParameters_create()\r\n\t(corners, ids, rejected) = cv2.aruco.detectMarkers(\r\n\t\timage, arucoDict, parameters=arucoParams)\r\n\t# if at least one ArUco marker was detected display the ArUco\r\n\t# name to our terminal\r\n\tif len(corners) > 0:\r\n\t\tprint(\"[INFO] detected {} markers for '{}'\".format(\r\n\t\t\tlen(corners), arucoName))","repo_name":"rafaelprr6/calibracao","sub_path":"deteccao_dicionario_aruco.py","file_name":"deteccao_dicionario_aruco.py","file_ext":"py","file_size_in_byte":3001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1883659360","text":"from django.contrib import admin\nfrom .models import Page\n\n# Register your models here.\nclass PageAdmin(admin.ModelAdmin):\n list_display = ['title', 'slug']\n list_display_links = ('title',)\n\n prepopulated_fields = {'slug': ('title',)}\n\n\nadmin.site.register(Page, PageAdmin)","repo_name":"odedahay/dj-eobsystem-jr","sub_path":"pages/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36800002103","text":"import numpy as np\n\na = np.array([1, 1, 1])\nb = np.array([2, 2, 2])\n\n# 矩阵合并\nc = np.vstack((a, b)) # 上下合并\nprint(c)\n\ne = a.reshape((3, 1)) # 将 [1 1 1] 变成3行一列的矩阵\nf = b.reshape((3, 1))\n\nd = np.hstack((e, f)) # 左右合并\nprint(d)","repo_name":"diaoyuqiang/python","sub_path":"数据处理/矩阵合并.py","file_name":"矩阵合并.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43937365480","text":"import os\nimport sys\nimport pygtk\npygtk.require('2.0')\nimport gtk\nimport gobject\n\nimport okegtk\nimport Notification\nimport TrayIcon\n\ndef gtk_main(Control):\n def setConError(error):\n disconnect(error=error)\n\n def setError(error):\n dialog = gtk.Dialog('Error', MainWindow)\n dialogLabel = gtk.Label(error)\n dialogLabel.set_property('wrap', True)\n dialogLabel.show()\n dialog.vbox.pack_start(dialogLabel)\n dialog.run()\n \n def setInbox(inbox):\n Control['MainWindow'].set_inbox(inbox)\n\n def setPensamiento(pensamientos):\n Control['MainWindow'].set_pen(pensamientos)\n\n def setOutbox(outbox):\n Control['MainWindow'].set_outbox(outbox)\n\n def setFavorito(favbox):\n Control['MainWindow'].set_fav(favbox)\n\n def newInbox(mensajes):\n Control['MainWindow'].new_inbox(mensajes)\n Control['Sound'].recibido()\n #self.__MainWindow.blink() #TODO\n men = list(mensajes)\n men.reverse()\n for m in men:\n openMessage = lambda *x, **y: MainWindow.openMessage(m[3])\n Control['Notification'].mensajeNew(m[0], openMessage, None, m[4], m[2])\n \n def newPensamiento(pensamientos):\n Control['MainWindow'].new_pen(pensamientos)\n Control['Sound'].pensamiento()\n pen = list(pensamientos)\n pen.reverse()\n for p in pen:\n Control['Notification'].pensamientoNew(p[0], None, None, p[4], p[2])\n\n def newOutbox(outbox):\n Control['MainWindow'].new_outbox(outbox)\n\n def redrawDone(*args):\n #Control['ActMen'].thStart()\n Control['ThreadHandler'].createActMen()\n Control['ThreadHandler'].startActMen()\n # moved to okegtk.\n #Control['Config'].setCurrentUser(Control['Okeyko'].getUser())\n #Control['Config'].readUserConfig()\n Control['Sound'].update()\n Control['Notification'].updateConfig()\n Tray.reBuildMenu()\n \n def disconnect(*args, **kargs):\n if kargs.has_key('error'):\n MainWindow.disconnect(error=kargs['error'])\n else:\n MainWindow.disconnect()\n \n \n def redrawDisconnect(*args):\n Control['Sound'].clearUpdate()\n Control['Okeyko'].disconnect()\n #Control['ActMen'].thStop()\n Control['ThreadHandler'].killActMen()\n Tray.buildMenu()\n\n def quit(*args):\n for window in gtk.window_list_toplevels(): #Hides all windows. \n try:\n window.saveMainWindowGeometry()\n except:\n pass\n window.hide()\n if not Tray.disabled:\n Tray.remove()\n gtk.main_quit()\n if Control['Okeyko'].conectado()[0]:\n Control['Config'].writeUserConfig()\n Control['Okeyko'].disconnect()\n Control['Config'].writeGlobalConfig()\n sys.exit(0)\n \n \n if os.name != 'nt':\n gtk.gdk.threads_init()\n\n OKC_FOKY = 'okc-foky'\n gtk.stock_add(((OKC_FOKY, '_FOKY', gtk.gdk.CONTROL_MASK, gtk.gdk.keyval_from_name('P'), 'FOKY'),))\n pixbufFoky = gtk.gdk.pixbuf_new_from_file(Control['Config'].pathFile('theme-foky.png'))\n iconSetFoky = gtk.IconSet(pixbufFoky)\n iconFact = gtk.IconFactory()\n iconFact.add(OKC_FOKY, iconSetFoky)\n iconFact.add_default()\n Control.update({'Quit': quit})\n Notificaciones = Notification.MainClass(Control)\n Control.update({'Notification': Notificaciones})\n MainWindow = okegtk.mainWindow(Control)\n Control.update({'MainWindow': MainWindow})\n #Tray = TrayIcon.TrayIcon(MainWindow)\n Tray = TrayIcon.TrayIcon(Control) \n #Control['ActMen'].setgui(MainWindow, Notificaciones)\n #Control['ThreadHandler'].setgui(MainWindow, Notificaciones)\n #MainWindow.connect('redraw-done', Control['ActMen'].thStart)\n MainWindow.connect('redraw-done', redrawDone)\n MainWindow.connect('redraw-disconnect', redrawDisconnect)\n \n Control['ThreadHandler'].connect('setConError', setConError)\n Control['ThreadHandler'].connect('setError', setError)\n Control['ThreadHandler'].ActMenConnect('setInbox', setInbox)\n Control['ThreadHandler'].ActMenConnect('setPensamiento', setPensamiento)\n Control['ThreadHandler'].ActMenConnect('setOutbox', setOutbox)\n Control['ThreadHandler'].ActMenConnect('setFavorito', setFavorito)\n Control['ThreadHandler'].ActMenConnect('newInbox', newInbox)\n Control['ThreadHandler'].ActMenConnect('newPensamiento', newPensamiento)\n Control['ThreadHandler'].ActMenConnect('newOutbox', newOutbox)\n \n gobject.timeout_add(500, Control['queueManager'], Control['queueToGui'])\n gtk.main()\n\n","repo_name":"NickCis/Okeykoclient","sub_path":"okeykoclient/gui/gtkui/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41086465997","text":"from numpy import *\nimport matplotlib.pyplot as plt\n\nFILE_PATH = './res/testSet.txt'\n\nx_cord0 = []\ny_cord0 = []\nx_cord1 = []\ny_cord1 = []\nmarkers = []\ncolors = []\nfr = open(FILE_PATH)\nfor line in fr.readlines():\n lineSplit = line.strip().split('\\t')\n xPt = float(lineSplit[0])\n yPt = float(lineSplit[1])\n label = int(lineSplit[2])\n if label == 0:\n x_cord0.append(xPt)\n y_cord0.append(yPt)\n else:\n x_cord1.append(xPt)\n y_cord1.append(yPt)\n\nfr.close()\nfig = plt.figure()\nax = fig.add_subplot(221)\nx_cord0 = []\ny_cord0 = []\nx_cord1 = []\ny_cord1 = []\nfor i in range(300):\n [x, y] = random.uniform(0, 1, 2)\n if ((x > 0.5) and (y < 0.5)) or ((x < 0.5) and (y > 0.5)):\n x_cord0.append(x)\n y_cord0.append(y)\n else:\n x_cord1.append(x)\n y_cord1.append(y)\nax.scatter(x_cord0, y_cord0, marker='.', s=90)\nax.scatter(x_cord1, y_cord1, marker='*', s=50, c='red')\nplt.title('A')\nax = fig.add_subplot(222)\nx_cord0 = random.standard_normal(150)\ny_cord0 = random.standard_normal(150)\nx_cord1 = random.standard_normal(150) + 2.0\ny_cord1 = random.standard_normal(150) + 2.0\nax.scatter(x_cord0, y_cord0, marker='.', s=90)\nax.scatter(x_cord1, y_cord1, marker='*', s=50, c='red')\nplt.title('B')\nax = fig.add_subplot(223)\nx_cord0 = []\ny_cord0 = []\nx_cord1 = []\ny_cord1 = []\nfor i in range(300):\n [x, y] = random.uniform(0, 1, 2)\n if x > 0.5:\n x_cord0.append(x * cos(2.0 * pi * y))\n y_cord0.append(x * sin(2.0 * pi * y))\n else:\n x_cord1.append(x * cos(2.0 * pi * y))\n y_cord1.append(x * sin(2.0 * pi * y))\nax.scatter(x_cord0, y_cord0, marker='.', s=90)\nax.scatter(x_cord1, y_cord1, marker='*', s=50, c='red')\nplt.title('C')\nax = fig.add_subplot(224)\n\nx_cord1 = zeros(150)\ny_cord1 = zeros(150)\nx_cord0 = random.uniform(-3, 3, 350)\ny_cord0 = random.uniform(-3, 3, 350)\nx_cord1[0:50] = 0.3 * random.standard_normal(50) + 2.0\ny_cord1[0:50] = 0.3 * random.standard_normal(50) + 2.0\n\nx_cord1[50:100] = 0.3 * random.standard_normal(50) - 2.0\ny_cord1[50:100] = 0.3 * random.standard_normal(50) - 3.0\n\nx_cord1[100:150] = 0.3 * random.standard_normal(50) + 1.0\ny_cord1[100:150] = 0.3 * random.standard_normal(50)\n\nax.scatter(x_cord0, y_cord0, marker='.', s=90)\nax.scatter(x_cord1, y_cord1, marker='*', s=50, c='red')\nplt.title('D')\nplt.show()","repo_name":"HanGaaaaa/MLAProject","sub_path":"SVM/notLinSeperable.py","file_name":"notLinSeperable.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"12467975457","text":"import pymongo \nimport pandas as pd \nimport json\nfrom config import mongo_client\n\n# VARIABLE NAMES\nDATA_FILE_PATH = \"/config/workspace/aps_failure_training_set1.csv\"\nDATABASE_NAME = \"sensor_fault_database\"\nCOLLECTION_NAME = \"sensor_fault_collection\"\n\nif __name__ == '__main__':\n df = pd.read_csv(DATA_FILE_PATH)\n print(df.shape)\n\n # Convert dataframe to json to dump data into mongodb\n df.reset_index(drop=True, inplace=True)\n json_records = list(json.loads(df.T.to_json()).values())\n # print(json_records[0])\n mongo_client[DATABASE_NAME][COLLECTION_NAME].insert_many(json_records)\n print(\"Data Inserted into MongoDB successfully!\")","repo_name":"Parvez13/Placement_Assignment-Sohail_Parvez-","sub_path":"Pre_Placement/ML_Assignment/Day_6_Assignment/Question_1/A/Data_ingestion_with_api/data_dump.py","file_name":"data_dump.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13731377083","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on: \t[2017-05-09]\r\n@author: \t\tAntony Smith\r\n@description: \tOpens image at predefined location counts faces\r\n\t\t\t\tIf file doesn't exist, creates file and appends\r\n\t\t\t\timage directory/name.jpg and number of faces counted\r\n\r\nRun:\t\t\tpython GPS_Main.py\r\n\"\"\"\r\nimport sys\r\nimport GPS_Class2 as GPS_Class\r\n\r\ndef main():\r\n\ttry:\r\n\t\tGPS = GPS_Class.GPSClass()\t\t\t\t# Create Instance of the class\r\n\t\tGPS.message(\"Message!\")\r\n\t\tGPS.Get_GPS()\r\n\t\t\r\n\t# Ctrl+C will exit the program correctly\r\n\texcept KeyboardInterrupt:\r\n\t\t#GPIO.cleanup()\t\t\t\t\t\t\t\t# Only if GPIO's were used\r\n\t\tser.close()\t\t\t\t\t\t\t\t\t# close the serial connection\r\n\t\tprint(\"\\r\\nEXIT PROGRAM!!\")\r\n\t\tsys.exit(0)\r\n \r\nif __name__ == \"__main__\": main()\r\n","repo_name":"ANTZ314/raspi","sub_path":"python/GPS/GPS_Main.py","file_name":"GPS_Main.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1180824311","text":"# Complete this class for all parts of the project\n\nfrom pacman_module.game import Agent\nimport numpy as np\nfrom pacman_module import util\nfrom scipy.stats import binom\n\n\nclass BeliefStateAgent(Agent):\n def __init__(self, args):\n \"\"\"\n Arguments:\n ----------\n - `args`: Namespace of arguments from command-line prompt.\n \"\"\"\n self.args = args\n \"\"\"\n Variables to use in 'update_belief_state' method.\n Initialization occurs in 'get_action' method.\n \"\"\"\n # Current list of belief states over ghost positions\n self.beliefGhostStates = None\n\n # Grid of walls (assigned with 'state.getWalls()' method)\n self.walls = None\n\n # Hyper-parameters\n self.ghost_type = self.args.ghostagent\n self.sensor_variance = self.args.sensorvariance\n\n self.p = 0.5\n self.n = int(self.sensor_variance/(self.p*(1-self.p)))\n\n def nb0fGhostLegalMoves(self, i, j):\n \n ghostlegalmoves = 0\n\n if(not self.walls[i-1][j]):\n ghostlegalmoves+=1\n if(not self.walls[i+1][j]): \n ghostlegalmoves+=1\n if(not self.walls[i][j+1]):\n ghostlegalmoves+=1\n if(not self.walls[i][j-1]):\n ghostlegalmoves+=1\n\n return ghostlegalmoves\n\n def transition_model(self, x_now, y_now, x_next, y_next, pacman_position):\n \n dist = util.manhattanDistance((x_now, y_now), (x_next, y_next))\n if dist != 1:\n return 0\n\n if self.walls[x_now][y_now] or self.walls[x_next][y_next]:\n return 0\n\n if self.ghost_type == \"confused\":\n \n nb_of_moves = self.nb0fGhostLegalMoves(x_now, y_now)\n proba=1/nb_of_moves\n return proba\n\n if self.ghost_type == \"afraid\":\n \n distrib = util.Counter()\n CellPosition=(x_now,y_now)\n CellDistance=util.manhattanDistance(pacman_position,CellPosition)\n if(not self.walls[x_now-1][y_now]):\n LeftDistance=util.manhattanDistance(pacman_position,(x_now-1,y_now)) \n if(CellDistance<=LeftDistance):\n distrib[(x_now -1 , y_now)]=2\n else:\n distrib[(x_now -1 , y_now)]=1\n \n if(not self.walls[x_now+1][y_now]):\n RightDistance=util.manhattanDistance(pacman_position,(x_now+1,y_now))\n if(CellDistance<=RightDistance):\n distrib[(x_now + 1 , y_now)]=2\n else: \n distrib[(x_now + 1 , y_now)]=1\n \n if(not self.walls[x_now][y_now-1]):\n DownDistance = util.manhattanDistance(pacman_position,(x_now,y_now-1))\n if(CellDistance<=DownDistance):\n distrib[(x_now , y_now - 1)]=2\n else:\n distrib[(x_now , y_now - 1)]=1\n \n if(not self.walls[x_now][y_now+1]):\n UpDistance=util.manhattanDistance(pacman_position,(x_now,y_now+1))\n if(CellDistance<=UpDistance):\n distrib[(x_now , y_now + 1)]=2\n else: \n distrib[(x_now , y_now + 1)]=1\n\n distrib.normalize()\n return distrib[(x_next, y_next)]\n\n if self.ghost_type == \"scared\":\n\n distrib = util.Counter()\n CellPosition=(x_now,y_now)\n CellDistance=util.manhattanDistance(pacman_position,CellPosition)\n if(not self.walls[x_now-1][y_now]):\n LeftDistance=util.manhattanDistance(pacman_position,(x_now-1,y_now)) \n if(CellDistance<=LeftDistance):\n distrib[(x_now - 1 , y_now )]=8\n else:\n distrib[(x_now - 1 , y_now )]=1\n if(not self.walls[x_now+1][y_now]):\n RightDistance=util.manhattanDistance(pacman_position,(x_now+1,y_now))\n if(CellDistance<=RightDistance):\n distrib[(x_now + 1 , y_now )]=8\n else:\n distrib[(x_now + 1 , y_now )]=1\n if(not self.walls[x_now][y_now-1]):\n DownDistance = util.manhattanDistance(pacman_position,(x_now,y_now-1))\n if(CellDistance<=DownDistance):\n distrib[(x_now , y_now - 1)]=8\n else:\n distrib[(x_now , y_now - 1)]=1\n if(not self.walls[x_now][y_now+1]):\n UpDistance=util.manhattanDistance(pacman_position,(x_now,y_now+1))\n if(CellDistance<=UpDistance):\n distrib[(x_now , y_now + 1)]=8\n else:\n distrib[(x_now , y_now + 1)]=1\n\n distrib.normalize()\n \n return distrib[(x_next, y_next)]\n\n\n\n def sensor_model(self, evidence, ghost_position, pacman_position):\n \n distance=util.manhattanDistance(pacman_position,ghost_position)\n return binom.pmf(evidence + self.n*self.p, self.n, self.p, loc=distance)\n\n\n\n def update_belief_state(self, evidences, pacman_position, ghosts_eaten):\n \n beliefStates = self.beliefGhostStates\n\n # XXX: Your code here\n \n \n\n N = self.walls.width\n M = self.walls.height\n\n \n \n for index in range(0, len(evidences)):\n mat = np.zeros((N,M))\n #mat is the \"Total transition matrix\"\n for i in range(0,N):\n for j in range(0, M): \n for n in range(0, N):\n for m in range(0, M):\n mat[i][j] += self.transition_model(n,m, i, j, pacman_position)*self.beliefGhostStates[index][n][m]\n \n\n for j in range(0, M):\n for i in range(0, N):\n beliefStates[index][i][j] = self.sensor_model(evidences[index], (i,j), pacman_position)*mat[i][j]\n alpha=beliefStates[index].sum()\n for j in range(0,M):\n for i in range(0,N):\n if(alpha!=0):\n beliefStates[index][i][j]=beliefStates[index][i][j]/alpha\n \n\n # XXX: End of your code\n\n self.beliefGhostStates = beliefStates\n\n return beliefStates\n\n def _get_evidence(self, state):\n \"\"\"\n Computes noisy distances between pacman and ghosts.\n\n Arguments:\n ----------\n - `state`: The current game state s_t\n where 't' is the current time step.\n See FAQ and class `pacman.GameState`.\n\n\n Return:\n -------\n - A list of Z noised distances in real numbers\n where Z is the number of ghosts.\n\n XXX: DO NOT MODIFY THIS FUNCTION !!!\n Doing so will result in a 0 grade.\n \"\"\"\n\n positions = state.getGhostPositions()\n pacman_position = state.getPacmanPosition()\n noisy_distances = []\n\n for pos in positions:\n true_distance = util.manhattanDistance(pos, pacman_position)\n rvs = binom.rvs(self.n, self.p)\n espérance = self.n*self.p\n noise = rvs - espérance\n noisy_distances.append(true_distance + noise)\n\n return noisy_distances\n\n def _record_metrics(self, belief_states, state):\n \"\"\"\n Use this function to record your metrics\n related to true and belief states.\n Won't be part of specification grading.\n\n Arguments:\n ----------\n - `state`: The current game state s_t\n where 't' is the current time step.\n See FAQ and class `pacman.GameState`.\n - `belief_states`: A list of Z\n N*M numpy matrices of probabilities\n where N and M are respectively width and height\n of the maze layout and Z is the number of ghosts.\n\n N.B. : [0,0] is the bottom left corner of the maze\n \"\"\"\n pass\n\n \n\n def get_action(self, state):\n \"\"\"\n Given a pacman game state, returns a belief state.\n\n Arguments:\n ----------\n - `state`: the current game state.\n See FAQ and class `pacman.GameState`.\n\n Return:\n -------\n - A belief state.\n \"\"\"\n\n \"\"\"\n XXX: DO NOT MODIFY THAT FUNCTION !!!\n Doing so will result in a 0 grade.\n \"\"\"\n # Variables are specified in constructor.\n if self.beliefGhostStates is None:\n self.beliefGhostStates = state.getGhostBeliefStates()\n if self.walls is None:\n self.walls = state.getWalls()\n\n evidence = self._get_evidence(state)\n newBeliefStates = self.update_belief_state(evidence,\n state.getPacmanPosition(),\n state.data._eaten[1:])\n self._record_metrics(self.beliefGhostStates, state)\n\n return newBeliefStates, evidence","repo_name":"anassgallass/Introduction-to-AI","sub_path":"agents/part 3 - reasoning over time/bayesfilter.py","file_name":"bayesfilter.py","file_ext":"py","file_size_in_byte":9031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39407981315","text":"#!/usr/bin/python env\n# -*- coding: utf-8 -*-\n\nimport os, sys, paramiko, time\n\ndate = time.strftime(\"%Y%m%d%H%M%S\", time.localtime)\n\nclass Connection(object):\n def __init__(self, ip, user, remote_file, port=22, key='/root/.ssh/id_rda'):\n self.ip = ip\n self.user = user\n self.port = int(port)\n self.remote_file = remote_file\n self.private_key = paramiko.R\n\n\n\n\ndat","repo_name":"bill1818/02-jiansutech-python3","sub_path":"06-deploy-python3/deloy-python-01.py","file_name":"deloy-python-01.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72166788852","text":"import turtle\nimport pandas as pd\n\nscreen = turtle.Screen()\nscreen.title(\"U.S. States Game\")\nimage = \"blank_states_img.gif\"\nscreen.addshape(image)\nturtle.shape(image)\nstates = pd.read_csv(\"50_states.csv\")\nguessed_states = []\n\n\nwhile True:\n answer_state = screen.textinput(title=f\"{len(guessed_states)}/50 States Correct\", prompt=\"What's another state's name?\").title()\n if answer_state == \"Exit\":\n break\n if answer_state not in guessed_states:\n if answer_state in states[\"state\"].values:\n guessed_states.append(answer_state.title())\n x = list(states.query(f\"state=='{answer_state}'\")['x'])[0]\n y = list(states.query(f\"state=='{answer_state}'\")['y'])[0]\n coordinate = (x, y)\n state = turtle.Turtle()\n state.penup()\n state.hideturtle()\n state.goto(coordinate)\n state.write(answer_state, font=(\"Verdana\", 10, \"normal\"))\n\n\nscreen.exitonclick()\n\n","repo_name":"matthewgiem/name_the_states","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2655893881","text":"# Definition for singly-linked list.\nfrom typing import List\n\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n def createStack(self, headA: ListNode) -> List[ListNode]:\n stack = []\n\n while headA:\n stack.append(headA)\n headA = headA.next\n\n return stack\n\n def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:\n \"\"\"\n Runtime: 152 ms, faster than 95.51% of Python3 online submissions.\n Memory Usage: 29.5 MB, less than 53.96% of Python3 online submissions.\n \"\"\"\n if headA and headB:\n stackA = self.createStack(headA)\n stackB = self.createStack(headB)\n intersect = 0\n\n while stackA and stackB:\n topA = stackA.pop()\n topB = stackB.pop()\n if topA != topB:\n if intersect == 0:\n return None\n else:\n return prev\n else:\n intersect += 1\n prev = topA\n\n return prev\n else:\n return None\n","repo_name":"IAjimi/Leetcode","sub_path":"160_Intersection_of_Two_Linked_Lists.py","file_name":"160_Intersection_of_Two_Linked_Lists.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18213274852","text":"class Solution:\n def checkIfPrerequisite(self, numCourses: int, prerequisites: List[List[int]], queries: List[List[int]]) -> List[bool]:\n ans = []\n # isPrerequisite[i][j] := True if course i is a prerequisite of course j.\n isPrerequisite = [[False] * numCourses for _ in range(numCourses)]\n\n for u, v in prerequisites:\n isPrerequisite[u][v] = True\n\n for k in range(numCourses):\n for i in range(numCourses):\n for j in range(numCourses):\n isPrerequisite[i][j] = isPrerequisite[i][j] or \\\n (isPrerequisite[i][k] and isPrerequisite[k][j])\n\n return [isPrerequisite[u][v] for u, v in queries]\n","repo_name":"walkccc/LeetCode","sub_path":"solutions/1462. Course Schedule IV/1462-2.py","file_name":"1462-2.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":756,"dataset":"github-code","pt":"21"} +{"seq_id":"16696227839","text":"import string\nimport random\nfrom datetime import date\nimport datetime\n# S = 4\n\nran = ''.join(random.choices(string.ascii_uppercase + string.digits,k=4))\n\nprint(\"ORD_\"+str(ran)) \nprint(\"TRANS_\"+str(ran))\nprint(date.today())\n# import string \n# import random # define the random module \n# S = 10 # number of characters in the string. \n# # call random.choices() string module to find the string in Uppercase + numeric data. \n# ran = ''.join(random.choices(string.ascii_uppercase + string.digits, k = S)) \n# print(\"The randomly generated string is : \" + str(ran)) # print the random data \n\ndata = input(\"Enter Date[yyyy-mm-dd]: \")\ndateArr = data.split('-')\nyear = dateArr[0]\nmonth = dateArr[1]\nday = dateArr[2]\n\ndatee = datetime.datetime(int(year),int(month),int(day))\n\nprint(\"Caclulated date: \",datee)","repo_name":"cs13engineer/python_workspace","sub_path":"Ecom/ran.py","file_name":"ran.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7622263432","text":"import numpy as np \nimport os \nimport shutil # Import the shutil library for file operations\n\n# Function to load images into a NumPy array\ndef load_image_into_numpy_array(image):\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)\n\n# Function to calculate the area of the displayed logo\ndef calculate_area(x1, y1, x2, y2):\n xDiff = abs(x1 - x2)\n yDiff = abs(y1 - y2)\n area = xDiff * yDiff\n return area\n\n# Function to calculate the shortest and largest area of displayed logos\ndef shortest_longest_area(area_list):\n area_list.sort()\n shortest = area_list[0]\n longest = area_list[-1]\n response = {\n \"shortest\": shortest,\n \"longest\": longest\n }\n return response\n\n# Function to delete and create a folder\ndef delete_and_create_folder(folder_path):\n if os.path.exists(folder_path):\n shutil.rmtree(folder_path) # Remove the folder if it exists\n os.makedirs(folder_path, 0o755) # Create the folder with appropriate permissions\n else:\n os.makedirs(folder_path, 0o755) # Create the folder if it doesn't exist\n","repo_name":"AjNavneet/brand-KPIs_exposure_video_analysis_using_tensorflow","sub_path":"src/ML_Pipeline/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18527614800","text":"import os\nfrom time import time\n\nfrom flask.views import MethodView\nfrom flask import jsonify, g\nfrom pymongo import MongoClient\n\nfrom yutou_library.apis.v1 import api_v1\nfrom yutou_library.apis.v1.auth import auth_required, select_library, can\nfrom yutou_library.models import Book\nfrom yutou_library.apis.v1.schemas import book_schema, books_schema\nfrom yutou_library.libs.error_code import BookNotFound, Success, DeleteSuccess, IllegalISBN\nfrom yutou_library.validators.book import BookForm, BookUpdateForm\nfrom yutou_library.libs.enums import BookStatus\nfrom yutou_library.extensions import db\nfrom yutou_library.spider import BookSpider\nfrom yutou_library.libs.helper import get_legal_isbn\n\n\n# TODO: FINISH BOOK API\n\nclass BookAPI(MethodView):\n decorators = [select_library, auth_required]\n\n def get_book(self, bid):\n user = g.current_user\n lid = user.selecting_library_id\n book = Book.query.filter_by(id=bid, lid=lid).first()\n if book is None:\n return BookNotFound()\n return book\n\n @can(\"READ_BOOK_INFO\")\n def get(self, bid):\n book = self.get_book(bid)\n return jsonify(book_schema(book)), 200\n\n @can(\"UPDATE_BOOK_INFO\")\n def put(self, bid):\n book = self.get_book(bid)\n form = BookUpdateForm().validate_for_api()\n isbn = form.isbn.data\n status = form.status.data\n title = form.title.data\n author = form.author.data\n image_urls = form.image_urls.data\n\n with db.auto_commit():\n book.isbn = isbn or book.isbn\n book.status = status or book.status\n book.title = title or book.title\n book.author = author or book.author\n book.image_urls = image_urls or book.image_urls\n return Success()\n\n @can(\"DELETE_BOOK\")\n def delete(self, bid):\n book = self.get_book(bid)\n with db.auto_commit():\n db.session.delete(book)\n return DeleteSuccess()\n\n\nclass BooksAPI(MethodView):\n decorators = [select_library, auth_required]\n\n @can(\"READ_BOOK_INFO\")\n def get(self):\n user = g.current_user\n lid = user.selecting_library_id\n books = Book.query.filter_by(lid=lid).all()\n return jsonify(books_schema(books)), 200\n\n @can(\"ADD_BOOK\")\n def post(self):\n form = BookForm().validate_for_api()\n isbn = form.isbn.data\n title = form.title.data\n author = form.author.data\n image_urls = form.image_urls.data\n lid = g.current_user.selecting_library_id\n\n with db.auto_commit():\n book = Book(lid=lid,\n isbn=isbn,\n status=BookStatus.A,\n title=title,\n author=author,\n image_urls=image_urls)\n db.session.add(book)\n return Success()\n\n\nclass BookDetailAPI(MethodView):\n def __init__(self):\n super().__init__()\n\n uri = os.getenv(\"MONGO_URI\", \"mongodb://test:test@localhost:27017\")\n database = os.getenv(\"MONGO_DATABASE\", \"test\")\n collection = os.getenv(\"MONGO_COLLECTION\", \"book\")\n\n self.client = MongoClient(uri)\n self.douban = self.client[database]\n self.book = self.douban[collection]\n self.spider = BookSpider()\n\n def _need_to_update(self, doc):\n if \"_tm\" in doc:\n current_time = time()\n update_time = doc[\"_tm\"]\n return (current_time - update_time) > 10 * 24 * 60 * 60 * 1000\n return True\n\n def get(self, isbn):\n # TODO: WRITE BOOK SPIDER\n isbn = get_legal_isbn(str(isbn))\n if not isbn:\n return IllegalISBN()\n doc = self.book.find_one({\"_id\": isbn})\n if doc is None:\n doc = self.spider.get_book_info(isbn)\n if doc is not None and len(doc) > 0:\n self.book.insert_one(doc)\n else:\n return BookNotFound()\n else:\n if self._need_to_update(doc):\n doc = self.spider.get_book_info(isbn)\n self.book.update_one({\"_id\": isbn}, doc)\n return jsonify(doc)\n\n\napi_v1.add_url_rule(\"/book/\", view_func=BookAPI.as_view(\"book_api\"), methods=[\"GET\", \"PUT\", \"DELETE\"])\napi_v1.add_url_rule(\"/book\", view_func=BooksAPI.as_view(\"books_api\"), methods=[\"GET\", \"POST\"])\napi_v1.add_url_rule(\"/book//detail\", view_func=BookDetailAPI.as_view(\"book_detail_api\"), methods=[\"GET\"])\n","repo_name":"Mananananana/yutou_library","sub_path":"yutou_library/apis/v1/resources/book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":4478,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"32519017569","text":"#!/usr/bin/python2.7\n\n'''\nunit test for same tree\n'''\nfrom src.same_tree import solution\n\n__author__ = 'hao.chen'\n\nimport unittest\n\nclass is_same_tree_tests(unittest.TestCase):\n\n def test_same_tree(self):\n n1 = tree_node(3)\n n1left = tree_node(0)\n n1right = tree_node(2)\n n1.left = n1left\n n1.right = n1right\n n2 = tree_node(1)\n n2left = tree_node(0)\n n2right = tree_node(2)\n n2.left = n2left\n n2.right = n2right\n sol = solution()\n self.failUnless(sol.is_same_tree(n1, n2) == False)\n\ndef main():\n unittest.main()\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"fifa007/Leetcode","sub_path":"test/test_same_tree.py","file_name":"test_same_tree.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19736315111","text":"class Solution:\n def jump(self, nums: List[int]) -> int:\n jump = 0\n l, r = 0, 0 \n while r < len(nums)-1:\n farthest = 0\n for i in range(l, r+1):\n farthest = max(farthest, i + nums[i])\n l = r + 1\n r = farthest\n jump += 1\n return jump","repo_name":"bhavikjain403/LeetCode","sub_path":"0045-jump-game-ii/0045-jump-game-ii.py","file_name":"0045-jump-game-ii.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"69937377654","text":"# CS114 Spring 2020 Programming Assignment 3\n# N-gram Language Models\n\nfrom collections import defaultdict\nfrom languageModel import LanguageModel\nimport numpy as np\nfrom scipy.sparse import lil_matrix\n\n\nclass Bigram(LanguageModel):\n\n def __init__(self):\n # self.word_dict[word] = index\n self.word_dict = {}\n # self.total[previous_word] = count(previous_word)\n self.total = None\n self.prob_counter = None\n\n '''\n Trains a bigram language model on a training set.\n Specifically, fills in self.prob_counter such that:\n self.prob_counter[previous_word][word] = P(word|previous_word)\n '''\n def train(self, trainingSentences):\n word_counts = defaultdict(lambda: defaultdict(int))\n\n # iterate over training sentences\n for sentence in trainingSentences:\n for i, word in enumerate(sentence):\n if i == 0:\n word_counts[LanguageModel.START][word] += 1\n else:\n word_counts[sentence[i-1]][word] += 1 # word_counts[previous word][word]\n word_counts[sentence[-1]][LanguageModel.STOP] += 1\n\n # Deal with the unknown word counts\n for previous_word in list(word_counts.keys()):\n word_counts[previous_word][LanguageModel.UNK] += 1\n word_counts[LanguageModel.UNK][previous_word] += 1 # this will add an extra value [UNKNOWN][START]\n word_counts[LanguageModel.UNK][LanguageModel.UNK] += 1\n del word_counts[LanguageModel.UNK][LanguageModel.START]\n\n self.prob_counter = lil_matrix((len(word_counts), len(word_counts)))\n # sort words alphabetically\n # To simplify the procedure, set index 0 for START and STOP (will delete START afterwards)\n self.word_dict[LanguageModel.STOP] = 0\n self.word_dict[LanguageModel.START] = 0\n # set index for other words\n temp_word_list = sorted(word_counts)\n temp_word_list.remove(LanguageModel.START)\n for i, index_word in enumerate(temp_word_list):\n self.word_dict[index_word] = i + 1\n\n for previous_word, word_dict in word_counts.items():\n for word in word_dict:\n i = self.word_dict[previous_word]\n j = self.word_dict[word]\n self.prob_counter[i, j] = word_counts[previous_word][word]\n\n del self.word_dict[LanguageModel.START] # START should not occur in word dict\n # normalize counts to probabilities\n self.total = self.prob_counter.sum(axis=1)\n # to keep matrix sparse, use multiplication instead of division\n # also convert matrix back to lil format\n self.prob_counter = self.prob_counter.multiply(1 / self.total).tolil()\n return\n\n '''\n Returns the probability of the word at index, according to the model, within\n the specified sentence.\n '''\n def getWordProbability(self, sentence, index):\n # Note that START is not in self.word_dict\n previous_word_index = None\n if index == len(sentence):\n word = LanguageModel.STOP\n previous_word = sentence[-1]\n else:\n word = sentence[index]\n if index == 0:\n previous_word = LanguageModel.START\n previous_word_index = 0\n else:\n previous_word = sentence[index-1]\n\n if word not in self.word_dict:\n word = LanguageModel.UNK\n\n word_index = self.word_dict[word]\n\n if (previous_word not in self.word_dict) and (previous_word != LanguageModel.START):\n previous_word = LanguageModel.UNK\n\n if previous_word_index is None:\n previous_word_index = self.word_dict[previous_word]\n\n return self.prob_counter[previous_word_index, word_index]\n\n '''\n Returns, for a given context, a random word, according to the probabilities\n in the model.\n '''\n def generateWord(self, context):\n if context:\n previous_word = context[-1]\n else:\n previous_word = LanguageModel.START\n\n if (previous_word not in self.word_dict) and (previous_word != LanguageModel.START):\n previous_word = LanguageModel.UNK\n\n if previous_word == LanguageModel.START:\n previous_word_index = 0\n else:\n previous_word_index = self.word_dict[previous_word]\n\n probs = self.prob_counter[previous_word_index].toarray().ravel()\n word_list = sorted(self.word_dict.items(), key=lambda item: item[1])\n word_list = [k[0] for k in word_list]\n return np.random.choice(word_list, p=probs)\n","repo_name":"GYHenryTT/Computational-Linguistic","sub_path":"PA3/bigram.py","file_name":"bigram.py","file_ext":"py","file_size_in_byte":4619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2863908810","text":"from flask import jsonify\nfrom dao.admin import AdminDAO\nclass AdministratorHandler:\n\n def build_admin_dict(self, row):\n result = {}\n result['admin_id'] = row[0]\n result['permission_key'] = row[1]\n result['p_id'] = row[2]\n return result\n\n def build_resource_dict(self, row):\n result = {}\n result['r_id'] = row[0]\n result['r_type'] = row[1]\n result['r_quantity'] = row[2]\n result['r_location'] = row[3]\n if result['r_type'] == 'Water':\n result['water_type'] = row[4]\n result['measurement_unit'] = row[5]\n result['r_availability'] = row[6]\n result['admin_id']= row[7]\n elif result['r_type'] == 'Fuel':\n result['fuel_type'] = row[4]\n result['fuel_octane_rating'] = row[5]\n result['r_availability'] = row[6]\n result['admin_id'] = row[7]\n elif result['r_type'] == 'Food':\n result['food_type'] = row[4]\n result['r_availability'] = row[5]\n result['admin_id'] = row[6]\n else:\n result['r_availability'] = row[4]\n result['admin_id'] = row[5]\n return result\n\n def getAllAdmin(self):\n dao = AdminDAO()\n admin_list = dao.getAllAdmin()\n result_list = []\n for row in admin_list:\n result = self.build_admin_dict(row)\n result_list.append(result)\n return jsonify(AdminList=result_list)\n\n def getAdminById(self, admin_id):\n dao = AdminDAO()\n admin = dao.getAdminById(admin_id)\n if not admin:\n return jsonify(Error=\"Admin Not Found\"), 404\n else:\n admin = self.build_admin_dict(admin)\n return jsonify(Admin=admin)\n\n def getResourcesByAdminId(self, admin_id):\n dao = AdminDAO()\n admin1 = dao.getAdminById(admin_id)\n if not admin1:\n return jsonify(Error=\"Admin Not Found\"), 404\n resources_list = dao.getResourcesByAdminId(admin_id)\n result_list = []\n for row in resources_list:\n result = self.build_resource_dict(row)\n result_list.append(result)\n return jsonify(ResourcesByAdminID=result_list)\n\n def insertAdmin(self, form):\n if form and len(form) == 2:\n permission_key = form['permission_key']\n p_id = form['p_id']\n\n if permission_key and p_id:\n dao = AdminDAO()\n admin_id = dao.insertAdmin(permission_key, p_id)\n result = {}\n result['admin_id'] = admin_id\n result['permission_key'] = permission_key\n result['p_id'] = p_id\n return jsonify(Admin=result), 201\n else:\n return jsonify('Unexpected attributes in post request'), 401\n else:\n return jsonify(Error=\"Malformed post request\"), 400\n\n def deleteAdmin(self, admin_id):\n dao = AdminDAO()\n admin = dao.getAdminById(admin_id)\n if not admin:\n return jsonify(Error=\"Admin Not Found\"), 404\n dao.deleteAdmin(admin_id)\n return jsonify(DeleteStatus=\"OK\"), 200\n\n def updateAdmin(self, admin_id, form):\n dao =AdminDAO()\n admin = dao.getAdminById(admin_id)\n if not admin:\n return jsonify(Error=\"Admin Not Found\"), 404\n if len(form) != 2:\n return jsonify(Error=\"Malformed update request\"), 400\n else:\n permission_key = form['permission_key']\n p_id = form['p_id']\n if permission_key and p_id:\n dao.updateAdmin(admin_id, permission_key, p_id)\n result = {}\n result['admid_id'] = admin_id\n result['permission_key'] = permission_key\n result['p_id'] = p_id\n return jsonify(Admin=result), 200\n else:\n return jsonify(Error=\"Unexpected attributes in update request\"), 400\n\n def manageResource(self, admin_id, form):\n dao = AdminDAO()\n if not dao.getAdminById(admin_id):\n return jsonify(Error=\"Admin not found.\"), 404\n else:\n if len(form) != 1:\n return jsonify(Error=\"Malformed update request\"), 400\n else:\n rid = form['r_id']\n if rid:\n dao.manageResource(admin_id, rid)\n result = {}\n result['admin_id'] = admin_id\n result['r_id'] = rid\n return jsonify(Manage=result), 200\n else:\n return jsonify(Error=\"Unexpected attributes in insert request\"), 400","repo_name":"Statedbump/Emergency_Inventory_System","sub_path":"handlers/administratorhandler.py","file_name":"administratorhandler.py","file_ext":"py","file_size_in_byte":4697,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"36099493736","text":"# Librerías\nfrom flask import Flask, jsonify, request, send_from_directory\nfrom flask_cors import CORS\nimport paho.mqtt.client as mqtt\nfrom dotenv import load_dotenv\nload_dotenv()\nimport os\n\n# Nueva instancia de Flask\napp = Flask(__name__)\nCORS(app, origins=[os.getenv('FRONTEND_SERVER')], methods=['GET', 'POST'], headers=['Content-Type', 'Authorization'])\n\n# Variables\nshapes = ['Bird','Cat','Fish','House','Plane','Rocket','Swan','Tree']\npiecesDict = {\n 'r-blue' : '^',\n 's-yellow' : 'I',\n 't-purple' : '7',\n 't-brown' : '3',\n 't-green' : 't',\n 't-orange' : 'U',\n 't-red' : 'r',\n}\nmqtt_client = mqtt.Client()\nbroker_address = os.getenv('BROCKER_MQRTT')\nmqtt_client.connect(broker_address, 1883, 60)\n\n# Funciones\ndef format_message(message):\n hashed_msg = [piecesDict[piece] for piece in message.split('/')]\n return hashed_msg\n\ndef send_mqtt(topic, message):\n mqtt_client.publish(topic, message)\n print(f'MQTT => Petición {topic} enviada')\n print(f'... {message}')\n\n@app.route('/')\ndef initial_server():\n return 'Backend is working'\n\n# Cards API Route\n@app.route('/cards')\ndef get_cards():\n # Lista de cartas a renderizar\n return {\n 'Shapes': shapes,\n 'Pieces': list(piecesDict),\n }\n@app.route('/cards//')\ndef get_image(folder, image):\n # Retornamos imagen de carta\n return send_from_directory('static/'+folder, image)\n\n# SendMqtt API Route\n@app.route('/sendMqtt', methods=['POST'])\ndef send_message():\n message = request.json.get('message')\n message['pieces'] = format_message(message['pieces'])\n send_mqtt('assembly', message['shape'] + '/' + '/'.join(message['pieces']))\n return jsonify('String recibido con éxito')\n\n# Ejecución de la aplicación\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"JesusAFD19/Smart_Factory","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"22208296070","text":"import numpy as np\n\nfrom equation import Grid, Equation\n\n\nclass ButcherTable:\n def __init__(self, a: np.ndarray, b: np.ndarray, c: np.ndarray):\n self.s = a.shape[0]\n assert b.size == self.s, 'size of b differs'\n assert c.size == self.s, 'size of c differs'\n\n self.a = a\n self.b = b\n self.c = c\n\n\nclass RungeKuttaMethod:\n def __init__(self, table: ButcherTable):\n self.table = table\n\n def __call__(self, equation: Equation, grid: Grid):\n y = np.zeros(grid.n_points)\n y[0] = equation.y_0\n\n for n in range(1, grid.n_points):\n y[n] = y[n - 1]\n k = np.zeros(self.table.s)\n\n for i in range(self.table.s):\n x_ = grid[n - 1] + grid.h * self.table.c[i]\n y_ = y[n - 1]\n for j in range(i):\n y_ += self.table.a[i, j] * grid.h * k[j]\n k[i] = equation.f(x_, y_)\n\n for i in range(self.table.s):\n y[n] += grid.h * k[i] * self.table.b[i]\n\n return y\n\n\ndef modified_euler() -> ButcherTable:\n a = np.array([[0.0, 0.0],\n [0.5, 0.0]])\n\n b = np.array([0.0, 1.0])\n\n c = np.array([0.0, 0.5])\n\n return ButcherTable(a, b, c)\n\n\ndef euler_with_count() -> ButcherTable:\n a = np.array([[0.0, 0.0],\n [1.0, 0.0]])\n b = np.array([0.5, 0.5])\n c = np.array([0.0, 1.0])\n return ButcherTable(a, b, c)\n\n\ndef heun_1() -> ButcherTable:\n a = np.array([[0.0, 0.0, 0.0],\n [1 / 3, 0.0, 0.0],\n [0.0, 2 / 3, 0.0]])\n b = np.array([1 / 4, 0.0, 3 / 4])\n c = np.array([0.0, 1 / 3, 2 / 3])\n return ButcherTable(a, b, c)\n\n\ndef heun_2() -> ButcherTable:\n a = np.array([[0.0, 0.0, 0.0],\n [2 / 3, 0.0, 0.0],\n [-1 / 3, 1.0, 0.0]])\n b = np.array([1 / 4, 1 / 2, 1 / 4])\n c = np.array([0.0, 2 / 3, 2 / 3])\n return ButcherTable(a, b, c)\n\n\ndef heun_3() -> ButcherTable:\n a = np.array([[0.0, 0.0, 0.0],\n [1 / 2, 0.0, 0.0],\n [-1.0, 2.0, 0.0]])\n b = np.array([1 / 6, 2 / 3, 1 / 6])\n c = np.array([0.0, 1 / 2, 1.0])\n return ButcherTable(a, b, c)\n\n\ndef runge_kutta_4() -> ButcherTable:\n a = np.array([[0.0, 0.0, 0.0, 0.0],\n [0.5, 0.0, 0.0, 0.0],\n [0.0, 0.5, 0.0, 0.0],\n [0.0, 0.0, 1.0, 0.0]])\n b = np.array([1 / 6, 1 / 3, 1 / 3, 1 / 6])\n c = np.array([0.0, 0.5, 0.5, 1.0])\n return ButcherTable(a, b, c)\n","repo_name":"grim-yawn/CompMath","sub_path":"ODE/runge_kutta_methods.py","file_name":"runge_kutta_methods.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36504693733","text":"import numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\nyears = [2015, 2016, 2017, 2018, 2019, 2020]\r\noverallHealth = [11000, 11236, 12416, 11051, 12185, 13805]\r\n\r\nz = np.polyfit(years, overallHealth, 2)\r\np = np.poly1d(z)\r\n\r\nyears.append(2021)\r\noverallHealth.append(p(2021))\r\n\r\nplt.plot(years, p(years), \"red\")\r\nplt.scatter(years, overallHealth, s=100)\r\n\r\nplt.xlabel(\"Years\")\r\nplt.ylabel(\"Overall Health\")\r\n\r\nplt.show()\r\n","repo_name":"gloryc34/Alzheimer-s_DataMining","sub_path":"OverallHealthPrediction.py","file_name":"OverallHealthPrediction.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41025618194","text":"import random\r\nfrom time import sleep\r\n\r\ndef guess():\r\n User_guess = \"\"\r\n word = [\"ROCK\", \"PAPER\", \"SCISSORS\", \"ELEPHANT\", \"YELLOW\", \"NAME\", \"BLUE\", \"PLATE\"]\r\n guess_count = 0\r\n number_of_guesses = 7\r\n out_of_guesses = False\r\n random_index = random.randrange(len(word))\r\n\r\n while User_guess != word[random_index] and not(out_of_guesses):\r\n if guess_count < number_of_guesses:\r\n User_guess = input(\"Enter Guess: \").upper()\r\n guess_count += 1\r\n else:\r\n out_of_guesses = True\r\n\r\n if out_of_guesses:\r\n print(\"YOU ARE OUT OF GUESSES. YOU LOSE\")\r\n sleep(2.0)\r\n print(\"THE REAL WORD WAS \" + word[random_index])\r\n else:\r\n print(\"YOU WIN!\")\r\n print(\"THE REAL WORD WAS \" + word[random_index])\r\n\r\n sleep(2.0)\r\n\r\n\r\ndef replay():\r\n while True:\r\n Replay = input(\"Would you like to play again? Yes or No: \").upper()\r\n if Replay == \"YES\":\r\n guess()\r\n elif Replay == \"NO\":\r\n break\r\n \r\n \r\nguess()\r\nreplay()","repo_name":"SamiSaifudin/Guessing-Game","sub_path":"Guessing_game.py","file_name":"Guessing_game.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32745623168","text":"'''\nUtility functions for querying elasticsearch.\n\ndate: 24.06.18\nauthor: Alexander Soen\n'''\n\nfrom datetime import datetime\n\ndef paper_info_to_cache_json(paper_info, additional_tag={}):\n ''' Cache a paper_info dictionary.\n '''\n # Setup meta data\n meta_json = dict()\n meta_json['_id'] = paper_info['PaperId']\n meta_json['_index'] = 'paper_info'\n meta_json['_type'] = 'doc'\n\n # Source data\n source = paper_info\n source['CreatedDate'] = datetime.now()\n source.update(additional_tag)\n\n # Return join of data\n meta_json['_source'] = source\n return meta_json\n\n\ndef field_del(val_dict, field):\n ''' Helper to delete a field in a dictionary without worry about a\n KeyError.\n '''\n try:\n del val_dict[field]\n except KeyError:\n pass\n\n return val_dict\n\n\ndef chunker(elements, batch_size):\n ''' Standard chunker.\n '''\n size = len(elements)\n for i in range(0, size, batch_size):\n yield elements[i: min(size, i+batch_size)]\n","repo_name":"csmetrics/influencemap","sub_path":"core/search/query_utility.py","file_name":"query_utility.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"21"} +{"seq_id":"42787810205","text":"def estritamente_crescente(lista):\n if lista==[]:\n return []\n \n maximo=lista[0]\n lista2=[maximo]\n \n if lista!=[]:\n for i in range(len(lista)-1):\n if lista[i+1]> maximo:\n maximo=lista[i+1]\n lista2.append(lista[i+1])\n\n\n \n return lista2\n","repo_name":"gabriellaec/desoft-analise-exercicios","sub_path":"backup/user_169/ch47_2020_04_12_23_05_57_885848.py","file_name":"ch47_2020_04_12_23_05_57_885848.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74002553333","text":"# -*- coding: utf-8 -*-\nfrom edi.checkapp import _\nfrom Products.Five.browser import BrowserView\nfrom plone import api as ploneapi\nfrom edi.checkapp.content.frage import possibleQuestionsOrPages, SiguvColors\nfrom edi.checkapp.views.formsnippets import textline, textline_unit, textarea, select, radiobutton, checkbox\nfrom zope.component import getUtility\nfrom plone.i18n.normalizer.interfaces import IIDNormalizer\n\nfrom Acquisition import aq_inner\nfrom zope.component import getUtility\nfrom zope.intid.interfaces import IIntIds\nfrom zope.security import checkPermission\nfrom zc.relation.interfaces import ICatalog\nfrom plone.app.linkintegrity.handlers import referencedRelationship\nfrom plone.i18n.normalizer import idnormalizer\n\nclass FiveRulesView(BrowserView):\n\n def back_references(self, source_object, attribute_name):\n \"\"\"\n Return back references from source object on specified attribute_name\n \"\"\"\n catalog = getUtility(ICatalog)\n intids = getUtility(IIntIds)\n result = []\n for rel in catalog.findRelations(\n dict(to_id=intids.getId(aq_inner(source_object)),\n from_attribute=attribute_name)\n ):\n obj = intids.queryObject(rel.from_id)\n if obj is not None and checkPermission('zope2.View', obj):\n result.append(obj)\n return result\n\n def create_kopffragen(self):\n normalizer = getUtility(IIDNormalizer)\n kopffragen = ''\n for i in self.context.kopffragen:\n title = i.get('frage')\n id = normalizer.normalize(title)\n fieldclass = 'edi__checkapp'\n typ = i.get('antworttyp')\n einheit = i.get('einheit')\n optionen = i.get('optionen')\n if typ == 'radio':\n kopffragen += radiobutton(id, fieldclass, title, optionen)\n elif typ == 'checkbox':\n kopffragen += checkbox(id, fieldclass, title, optionen)\n elif typ in ['text', 'date', 'datetime-local']:\n kopffragen += textline(id, fieldclass, title, typ)\n elif typ == 'number' and not einheit:\n kopffragen += textline(id, fieldclass, title, typ)\n elif typ == 'number' and einheit:\n kopffragen += textline_unit(id, fieldclass, title, typ, einheit)\n elif typ == 'textarea':\n kopffragen += textarea(id, fieldclass, title)\n return kopffragen\n\n\n def get_content(self):\n themen = {}\n depends = []\n for i in self.context.themenbereiche:\n if '#' in i:\n thema = i.split('#')[1]\n themen[thema] = []\n else:\n themen[i] = []\n for k in self.context.getFolderContents():\n if k.portal_type == 'Fragestellung':\n obj = k.getObject()\n entry = {}\n entry['id'] = 'edi'+obj.UID()\n entry['class'] = \"\"\n entry['title'] = obj.title\n entry['frage'] = u''\n entry['placeholder'] = obj.platzhalter\n entry['default'] = getattr(obj, 'default', '')\n entry['hidden'] = getattr(obj, 'hidden', False)\n entry['ausrichtung'] = obj.ausrichtung\n entry['einheit'] = obj.einheit\n entry['typ'] = obj.antworttyp\n entry['required'] = obj.required\n if obj.getId() in depends:\n entry['class'] = \"collapse\"\n if obj.antworttyp in ['radio', 'checkbox']:\n entry['title'] = obj.title\n entry['optionen'] = obj.listFolderContents(contentFilter={\"portal_type\" : \"Antwortoption\"})\n if obj.frage:\n entry['frage'] = obj.frage.output\n for opt_object in obj.listFolderContents(contentFilter={\"portal_type\" : \"Antwortoption\"}):\n if opt_object.dep_fields:\n if opt_object.dep_fields.to_object:\n depends.append(opt_object.dep_fields.to_object.getId())\n entry['snippet'] = ploneapi.content.get_view('fragestellung-view', obj, self.request).create_formmarkup() \n entry['editurl'] = obj.absolute_url() + '/edit'\n if obj.thema in themen:\n themen[obj.thema].append(entry)\n elif k.portal_type == 'Hinweistext':\n obj = k.getObject()\n entry = {}\n entry['id'] = 'edi'+obj.UID()\n entry['title'] = \"\"\n entry['showtitle'] = False\n entry['class'] = \"\"\n entry['typ'] = 'HTML'\n entry['snippet'] = obj.hinweis.output\n entry['required'] = False\n if hasattr(obj, 'thema'):\n if obj.thema in themen:\n themen[obj.thema].append(entry)\n return themen\n\n def get_themenbereiche(self):\n themenbereiche = []\n for i in self.context.themenbereiche:\n if '#' in i:\n elements = i.split('#')\n if len(elements) == 2:\n elements.append(idnormalizer.normalize(elements[1]))\n elements.append(u'Block')\n elif len(elements) == 3:\n elements.insert(2, idnormalizer.normalize(elements[1]))\n themenbereiche.append(elements)\n else:\n themenbereiche.append(('', i, idnormalizer.normalize(i), u'Block')) \n return themenbereiche\n","repo_name":"kraeks/edi.checkapp","sub_path":"src/edi/checkapp/views/five_rules_view.py","file_name":"five_rules_view.py","file_ext":"py","file_size_in_byte":5614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4561393047","text":"with open ('words.txt', 'r') as text:\n list1 = text.read().split(\",\")\n\nmax_len = 0\n\nfor index, value in enumerate(list1):\n n_val = value.strip('\\\"')\n list1[index] = n_val\n if len(n_val) > max_len:\n max_len = len(n_val)\n\nchars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n\ntri_nums = []\n\nmax_score = max_len * 26\n\nmax_int = ( -1 + (1 + 8*max_score)**.5)/2 # By the quadratic formula, the number of iterations to achieve the max triangle number possible.\n\nfor i in range(1, int(max_int)+1):\n tri_nums.append(\n .5 * (i) * (i + 1)\n )\n\ncounter = 0\n\nfor word in list1:\n score = 0\n \n for character in word:\n ind = chars.find(character)\n score += ind + 1\n\n if score in tri_nums:\n counter += 1\n\nprint(counter) # 162 \n","repo_name":"brianfl/Euler","sub_path":"42.py","file_name":"42.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5060920665","text":"import llog\n\nimport struct\nimport logging\n\nimport sshtype\n\nSSH_MSG_DISCONNECT = 1\nSSH_MSG_IGNORE = 2\nSSH_MSG_UNIMPLEMENTED = 3\nSSH_MSG_DEBUG = 4\nSSH_MSG_SERVICE_REQUEST = 5\nSSH_MSG_SERVICE_ACCEPT = 6\nSSH_MSG_KEXINIT = 20\nSSH_MSG_NEWKEYS = 21\n\nSSH_MSG_USERAUTH_REQUEST = 50\nSSH_MSG_USERAUTH_FAILURE = 51\nSSH_MSG_USERAUTH_SUCCESS = 52\nSSH_MSG_USERAUTH_BANNER = 53\n\nSSH_MSG_USERAUTH_PK_OK = 60\n\nSSH_MSG_GLOBAL_REQUEST = 80\nSSH_MSG_REQUEST_SUCCESS = 81\nSSH_MSG_REQUEST_FAILURE = 82\nSSH_MSG_CHANNEL_OPEN = 90\nSSH_MSG_CHANNEL_OPEN_CONFIRMATION = 91\nSSH_MSG_CHANNEL_OPEN_FAILURE = 92\nSSH_MSG_CHANNEL_WINDOW_ADJUST = 93\nSSH_MSG_CHANNEL_DATA = 94\nSSH_MSG_CHANNEL_EXTENDED_DATA = 95\nSSH_MSG_CHANNEL_EOF = 96\nSSH_MSG_CHANNEL_CLOSE = 97\nSSH_MSG_CHANNEL_REQUEST = 98\nSSH_MSG_CHANNEL_SUCCESS = 99\nSSH_MSG_CHANNEL_FAILURE = 100\n\nSSH_MSG_CHANNEL_IMPLICIT_WRAPPER = 200\n\nlog = logging.getLogger(__name__)\n\nclass SshPacket():\n @staticmethod\n def parse_type(buf, offset=0):\n return struct.unpack_from(\"B\", buf, offset)[0]\n\n def __init__(self, packet_type=None, buf=None, offset=0):\n self.buf = buf\n self.packet_type = packet_type\n\n if not buf:\n return\n\n self._packet_type = packet_type # Expected packet_type.\n\n self.offset = offset\n\n self.parse();\n\n def parse(self):\n offset = self.offset\n\n self.packet_type = struct.unpack_from(\"B\", self.buf, offset)[0]\n\n if self._packet_type and self.packet_type != self._packet_type:\n raise Exception(\"Expecting packet type [{}] but got [{}].\"\\\n .format(self._packet_type, self.packet_type))\n\n return offset + 1\n\n def encode(self):\n nbuf = bytearray()\n nbuf += struct.pack(\"B\", self.packet_type & 0xFF)\n\n self.buf = nbuf\n\n return nbuf\n\nclass SshKexInitMessage(SshPacket):\n def __init__(self, buf = None):\n if buf == None:\n self.cookie = None\n self.kex_algorithms = \"\"\n self.server_host_key_algorithms = \"\"\n self.encryption_algorithms_client_to_server = \"\"\n self.encryption_algorithms_server_to_client = \"\"\n self.mac_algorithms_client_to_server = \"\"\n self.mac_algorithms_server_to_client = \"\"\n self.compression_algorithms_client_to_server = \"\"\n self.compression_algorithms_server_to_client = \"\"\n self.languages_client_to_server = \"\"\n self.languages_server_to_client = \"\"\n self.first_kex_packet_follows = \"\"\n\n super().__init__(SSH_MSG_KEXINIT, buf)\n\n def parse(self):\n super().parse()\n\n i = 17;\n self.cookie = self.buf[1:i]\n\n l, v = sshtype.parseNameList(self.buf[i:])\n self.kex_algorithms = v\n i += l\n\n def encode(self):\n nbuf = super().encode()\n\n nbuf += self.cookie\n nbuf += sshtype.encodeNameList(self.kex_algorithms)\n\n nbuf += sshtype.encodeNameList(self.server_host_key_algorithms)\n nbuf += sshtype.encodeNameList(self.encryption_algorithms_client_to_server)\n nbuf += sshtype.encodeNameList(self.encryption_algorithms_server_to_client)\n nbuf += sshtype.encodeNameList(self.mac_algorithms_client_to_server)\n nbuf += sshtype.encodeNameList(self.mac_algorithms_server_to_client)\n nbuf += sshtype.encodeNameList(self.compression_algorithms_client_to_server)\n nbuf += sshtype.encodeNameList(self.compression_algorithms_server_to_client)\n nbuf += sshtype.encodeNameList(self.languages_client_to_server)\n nbuf += sshtype.encodeNameList(self.languages_server_to_client)\n nbuf += struct.pack(\"?\", self.first_kex_packet_follows)\n nbuf += struct.pack(\">L\", 0)\n\n return nbuf\n\nclass SshKexdhInitMessage(SshPacket):\n def __init__(self, buf = None):\n if buf == None:\n self.e = None\n\n super().__init__(30, buf)\n\n def getE(self):\n return self.e\n\n def setE(self, e):\n self.e = e\n\n def parse(self):\n super().parse()\n\n self.e = sshtype.parseMpint(self.buf[1:])[1]\n\n def encode(self):\n nbuf = super().encode()\n\n nbuf += sshtype.encodeMpint(self.e)\n\n return nbuf\n\nclass SshKexdhReplyMessage(SshPacket):\n def __init__(self, buf = None):\n if buf == None:\n self.host_key = None\n self.f = None\n self.signature = None\n\n super().__init__(31, buf)\n\n def getHostKey(self):\n return self.host_key\n\n def setHostKey(self, val):\n self.host_key = val\n\n def getF(self):\n return self.f\n\n def setF(self, f):\n self.f = f\n\n def getSignature(self):\n return self.signature\n\n def setSignature(self, val):\n self.signature = val\n\n def parse(self):\n super().parse()\n\n i = 1\n l, self.host_key = sshtype.parseBinary(self.buf[i:])\n i += l\n l, self.f = sshtype.parseMpint(self.buf[i:])\n i += l\n l, self.signature = sshtype.parseBinary(self.buf[i:])\n\n def encode(self):\n nbuf = super().encode()\n\n nbuf += sshtype.encodeBinary(self.host_key)\n nbuf += sshtype.encodeMpint(self.f)\n nbuf += sshtype.encodeBinary(self.signature)\n\n return nbuf\n\nclass SshNewKeysMessage(SshPacket):\n def __init__(self, buf = None):\n super().__init__(SSH_MSG_NEWKEYS, buf)\n\n def encode(self):\n nbuf = super().encode()\n return nbuf\n\nclass SshServiceRequestMessage(SshPacket):\n def __init__(self, buf = None):\n if buf == None:\n self.service_name = None\n\n super().__init__(SSH_MSG_SERVICE_REQUEST, buf)\n\n def parse(self):\n super().parse()\n\n i = 1\n l, self.service_name = sshtype.parseString(self.buf[i:])\n\n def encode(self):\n nbuf = super().encode()\n\n nbuf += sshtype.encodeString(self.service_name)\n\n return nbuf\n\nclass SshServiceAcceptMessage(SshPacket):\n def __init__(self, buf = None):\n if buf == None:\n self.service_name = None\n\n super().__init__(SSH_MSG_SERVICE_ACCEPT, buf)\n\n def parse(self):\n super().parse()\n\n i = 1\n l, self.service_name = sshtype.parseString(self.buf[i:])\n\n def encode(self):\n nbuf = super().encode()\n\n nbuf += sshtype.encodeString(self.service_name)\n\n return nbuf\n\nclass SshDisconnectMessage(SshPacket):\n def __init__(self, buf = None):\n super().__init__(SSH_MSG_DISCONNECT, buf)\n\n def parse(self):\n super().parse()\n\n i = 1\n self.reason_code = struct.unpack(\">L\", self.buf[i:i+4])[0]\n i += 4\n l, self.description = sshtype.parseString(self.buf[i:])\n i += l\n l, self.language_code = sshtype.parseString(self.buf[i:])\n\n def encode(self):\n nbuf = super().encode()\n\n nbuf += struct.pack(\">L\", self.self.reason_code)\n nbuf += sshtype.encodeString(self.description)\n nbuf += sshtype.encodeString(self.language_tag)\n\n return nbuf\n\nclass SshUserauthRequestMessage(SshPacket):\n def __init__(self, buf = None):\n if buf == None:\n self.user_name = None\n self.service_name = None\n self.method_name = None\n\n super().__init__(SSH_MSG_USERAUTH_REQUEST, buf)\n\n def parse(self):\n super().parse()\n\n i = 1\n l, self.user_name = sshtype.parseString(self.buf[i:])\n i += l\n l, self.service_name = sshtype.parseString(self.buf[i:])\n i += l\n l, self.method_name = sshtype.parseString(self.buf[i:])\n i += l\n\n if self.method_name == \"publickey\":\n self.signature_present = struct.unpack(\"?\", self.buf[i:i+1])[0]\n i += 1\n l, self.algorithm_name = sshtype.parseString(self.buf[i:])\n i += l\n l, self.public_key = sshtype.parseBinary(self.buf[i:])\n if self.signature_present:\n i += l\n l, self.signature = sshtype.parseBinary(self.buf[i:])\n self.signature_length = l\n\n def encode(self):\n nbuf = super().encode()\n\n nbuf += sshtype.encodeString(self.user_name)\n nbuf += sshtype.encodeString(self.service_name)\n nbuf += sshtype.encodeString(self.method_name)\n \n if self.method_name == \"publickey\":\n nbuf += struct.pack(\"B\", self.signature_present)\n nbuf += sshtype.encodeString(self.algorithm_name)\n nbuf += sshtype.encodeBinary(self.public_key)\n # Leave signature for caller to append, as they need this encoded\n # data to sign.\n\n return nbuf\n\nclass SshUserauthFailureMessage(SshPacket):\n def __init__(self, buf = None):\n if buf == None:\n self.auths = None\n self.partial_success = None\n\n super().__init__(SSH_MSG_USERAUTH_FAILURE, buf)\n\n def parse(self):\n super().parse()\n\n i = 1\n l, self.auths = sshtype.parseNamelist(self.buf[i:])\n i += l\n self.partial_success = struct.decode(\"?\", self.buf[i:])\n\n def encode(self):\n nbuf = super().encode()\n\n nbuf += sshtype.encodeNameList(self.auths)\n nbuf += struct.pack(\"?\", self.partial_success)\n\n return nbuf\n\nclass SshUserauthSuccessMessage(SshPacket):\n def __init__(self, buf = None):\n super().__init__(SSH_MSG_USERAUTH_SUCCESS, buf)\n\n def parse(self):\n super().parse()\n\n def encode(self):\n nbuf = super().encode()\n return nbuf\n\nclass SshUserauthPkOkMessage(SshPacket):\n def __init__(self, buf = None):\n super().__init__(SSH_MSG_USERAUTH_PK_OK, buf)\n\n def parse(self):\n super().parse()\n\n i = 1\n l, self.algorithm_name = sshtype.parseString(self.buf[i:])\n i += l\n l, self.public_key = sshtype.parseBinary(self.buf[i:])\n\n def encode(self):\n nbuf = super().encode()\n\n nbuf += sshtype.encodeString(self.algorithm_name)\n nbuf += sshtype.encodeBinary(self.public_key)\n\n return nbuf\n\nclass SshChannelOpenMessage(SshPacket):\n def __init__(self, buf = None):\n self.data_packet = None\n\n super().__init__(SSH_MSG_CHANNEL_OPEN, buf)\n\n def parse(self):\n super().parse()\n\n i = 1\n l, self.channel_type = sshtype.parseString(self.buf[i:])\n i += l\n self.sender_channel = struct.unpack(\">L\", self.buf[i:i+4])[0]\n i += 4\n self.initial_window_size = struct.unpack(\">L\", self.buf[i:i+4])[0]\n i += 4\n self.maximum_packet_size = struct.unpack(\">L\", self.buf[i:i+4])[0]\n i += 4\n\n if i < len(self.buf):\n self.data_packet = self.buf[i:]\n\n def encode(self):\n nbuf = super().encode()\n\n nbuf += sshtype.encodeString(self.channel_type)\n nbuf += struct.pack(\">L\", self.sender_channel)\n nbuf += struct.pack(\">L\", self.initial_window_size)\n nbuf += struct.pack(\">L\", self.maximum_packet_size)\n\n if self.data_packet:\n nbuf += self.data_packet\n\n return nbuf\n\nclass SshChannelOpenConfirmationMessage(SshPacket):\n def __init__(self, buf = None):\n super().__init__(SSH_MSG_CHANNEL_OPEN_CONFIRMATION, buf)\n\n def parse(self):\n super().parse()\n\n i = 1\n self.recipient_channel = struct.unpack(\">L\", self.buf[i:i+4])[0]\n i += 4\n self.sender_channel = struct.unpack(\">L\", self.buf[i:i+4])[0]\n i += 4\n self.initial_window_size = struct.unpack(\">L\", self.buf[i:i+4])[0]\n i += 4\n self.maximum_packet_size = struct.unpack(\">L\", self.buf[i:i+4])[0]\n i += 4\n\n def encode(self):\n nbuf = super().encode()\n\n nbuf += struct.pack(\">L\", self.recipient_channel)\n nbuf += struct.pack(\">L\", self.sender_channel)\n nbuf += struct.pack(\">L\", self.initial_window_size)\n nbuf += struct.pack(\">L\", self.maximum_packet_size)\n\n return nbuf\n\nclass SshChannelOpenFailureMessage(SshPacket):\n def __init__(self, buf = None):\n super().__init__(SSH_MSG_CHANNEL_OPEN_FAILURE, buf)\n\n def parse(self):\n super().parse()\n\n i = 1\n self.recipient_channel = struct.unpack(\">L\", self.buf[i:i+4])[0]\n i += 4\n self.reason_code = struct.unpack(\">L\", self.buf[i:i+4])[0]\n i += 4\n l, self.description = sshtype.parseString(self.buf[i:])\n i += l\n l, self.language_tag = sshtype.parseString(self.buf[i:])\n\n def encode(self):\n nbuf = super().encode()\n\n nbuf += struct.pack(\">L\", self.recipient_channel)\n nbuf += struct.pack(\">L\", self.reason_code)\n nbuf += sshtype.encodeString(self.description)\n nbuf += sshtype.encodeString(self.language_tag)\n\n return nbuf\n\nclass SshChannelCloseMessage(SshPacket):\n def __init__(self, buf = None):\n self.recipient_channel = None\n self.implicit_channel = False\n\n super().__init__(SSH_MSG_CHANNEL_CLOSE, buf)\n\n def parse(self):\n super().parse()\n\n i = 1\n self.recipient_channel = struct.unpack(\">L\", self.buf[i:i+4])[0]\n i += 4\n if i < len(self.buf):\n self.implicit_channel = struct.unpack(\"?\", self.buf[i:i+1])[0]\n\n def encode(self):\n nbuf = super().encode()\n\n nbuf += struct.pack(\">L\", self.recipient_channel)\n if self.implicit_channel:\n nbuf += struct.pack(\"?\", self.implicit_channel)\n\n return nbuf\n\nclass SshChannelDataMessage(SshPacket):\n def __init__(self, buf=None, offset=0):\n self.recipient_channel = None\n self.data = None\n\n super().__init__(SSH_MSG_CHANNEL_DATA, buf, offset)\n\n def parse(self):\n i = super().parse()\n\n self.recipient_channel = struct.unpack_from(\">L\", self.buf, i)[0]\n i += 4\n self.data = self.buf[i:]\n\n def encode(self):\n nbuf = super().encode()\n\n nbuf += struct.pack(\">L\", self.recipient_channel)\n if self.data:\n # Allow data to be stored separately.\n nbuf += self.data\n\n return nbuf\n\nclass SshChannelExtendedDataMessage(SshPacket):\n def __init__(self, buf=None, offset=0):\n self.recipient_channel = None\n self.data_type_code = None\n self.data_offset = None\n\n super().__init__(SSH_MSG_CHANNEL_EXTENDED_DATA, buf, offset)\n\n def parse(self):\n super().parse()\n\n i = 1\n self.recipient_channel = struct.unpack_from(\">L\", self.buf, i)[0]\n i += 4\n self.data_type_code = struct.unpack(\">L\", self.buf, i)[0]\n i += 4\n self.data_offset = i\n\n def encode(self):\n nbuf = super().encode()\n\n nbuf += struct.pack(\">L\", self.recipient_channel)\n nbuf += struct.pack(\">L\", self.data_type_code)\n self.data_offset = len(nbuf)\n if self.data:\n # Allow data to be stored separately.\n nbuf += self.data\n\n return nbuf\n\nclass SshChannelRequest(SshPacket):\n def __init__(self, buf=None, offset=0):\n self.recipient_channel = None\n self.request_type = None\n self.want_reply = False\n self.payload = None\n\n super().__init__(SSH_MSG_CHANNEL_REQUEST, buf, offset)\n\n def parse(self):\n i = super().parse()\n\n self.recipient_channel = struct.unpack(\">L\", self.buf[i:i+4])[0]\n i += 4\n l, self.request_type = sshtype.parseString(self.buf[i:])\n i += l\n self.want_reply = struct.unpack(\"?\", self.buf[i:i+1])[0]\n i += 1\n\n if i == len(self.buf):\n return\n self.payload = self.buf[i:]\n\n def encode(self):\n nbuf = super().encode()\n\n nbuf += struct.pack(\">L\", self.recipient_channel)\n nbuf += sshtype.encodeString(self.request_type)\n nbuf += struct.pack(\"?\", self.want_reply)\n if self.payload:\n nbuf += self.payload\n\n return nbuf\n\nclass SshChannelImplicitWrapper(SshPacket):\n data_offset = 1\n\n def __init__(self, buf=None, offset=0):\n super().__init__(SSH_MSG_CHANNEL_IMPLICIT_WRAPPER, buf, offset)\n","repo_name":"bitcoinembassy/morphis","sub_path":"packet.py","file_name":"packet.py","file_ext":"py","file_size_in_byte":16062,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"21"} +{"seq_id":"12077664611","text":"from typing import Dict, List, Optional\n\nimport aiohttp\n\n\nclass ApiHttpRequest:\n \"\"\"Класс для работы с запросами к API.\"\"\"\n\n def __init__(self, session: aiohttp.ClientSession, url: str):\n self.session = session\n self.url = url\n\n async def get_token(self, username: str, password: str) -> str:\n \"\"\"Возвращает токен для доступа к эндпоинтам API.\"\"\"\n\n async with self.session.post(\n url=self.url,\n headers={'Content-Type': 'application/x-www-form-urlencoded'},\n data={'username': username, 'password': password}\n ) as resp:\n return await resp.json()\n\n async def get_obj_list(\n self,\n token: str,\n params: Optional[Dict[str, str]] = None\n ) -> List[Optional[Dict[str, str]]]:\n \"\"\"Возвращает список данных, полученных через API.\"\"\"\n\n async with self.session.get(\n url=self.url,\n headers={'Authorization': f'Bearer {token}'},\n params=params\n ) as resp:\n return await resp.json()\n\n async def get_obj_from_id(\n self,\n token: str,\n id: int\n ):\n \"\"\"Возвращает единичный объект данных, полученных через API.\"\"\"\n\n url = '{0}{1}/'.format(self.url, id)\n async with self.session.get(\n url=url,\n headers={'Authorization': f'Bearer {token}'}\n ) as resp:\n return await resp.json()\n\n async def get_obj_count(\n self,\n token: str,\n ):\n \"\"\"Возвращает количество объектов в выбранной таблице базы данных.\"\"\"\n\n url = '{0}count/'.format(self.url)\n async with self.session.get(\n url=url,\n headers={'Authorization': f'Bearer {token}'}\n ) as resp:\n return await resp.json()\n\n async def get_users(\n self,\n token: str,\n params: Optional[Dict[str, str]] = None\n ) -> List[Optional[Dict[str, str]]]:\n \"\"\"Возвращает список пользователей, полученных через API.\"\"\"\n\n return await self.get_obj_list(token, params)\n\n async def get_or_create_user(\n self,\n token: str,\n username: str,\n telegram_id: str\n ):\n \"\"\"Возвращает пользователя, либо создает нового в базе данных.\"\"\"\n\n params = {'tgid': telegram_id}\n user = await self.get_users(token, params)\n if not user:\n async with self.session.post(\n url=self.url,\n headers={'Authorization': f'Bearer {token}'},\n json={'username': username, 'telegram_id': telegram_id}\n ) as resp:\n return await resp.json()\n return user\n\n async def get_user_id(\n self,\n token: str,\n tg_id: Dict[str, int]\n ) -> int:\n \"\"\"Функция получения пользователя по id.\"\"\"\n\n user = await self.get_users(\n token=token,\n params=tg_id\n )\n try:\n return user[0]['id']\n except (IndexError, KeyError):\n return 'Error. This user not found'\n\n async def create_order(\n self,\n token: str,\n title: str,\n description: str,\n user_id: Dict[str, int]\n ):\n \"\"\"Функция создания заказа.\"\"\"\n\n async with self.session.post(\n url=self.url,\n headers={'Authorization': f'Bearer {token}'},\n json={\n 'title': title,\n 'description': description,\n 'user_id': int(user_id)\n }\n ) as resp:\n return await resp.json()\n\n async def get_orders(\n self,\n token: str,\n params: Optional[Dict[str, str]] = None\n ) -> List[Optional[Dict[str, str]]]:\n \"\"\"Возвращает список заказов, полученных через API.\"\"\"\n\n return await self.get_obj_list(token, params)\n\n async def get_user_from_id(\n self,\n token: str,\n id: int\n ):\n \"\"\"Получение пользователя по id.\"\"\"\n\n return await self.get_obj_from_id(token, id)\n\n async def get_order_from_id(\n self,\n token: str,\n id: int\n ):\n \"\"\"Получение заказа по id.\"\"\"\n\n return await self.get_obj_from_id(token, id)\n\n async def get_user_count(\n self,\n token: str\n ):\n \"\"\"Возвращает количество пользователей в базе данных.\"\"\"\n\n return await self.get_obj_count(token)\n\n async def get_order_count(\n self,\n token: str\n ):\n \"\"\"Возвращает количество заказов в базе данных.\"\"\"\n\n return await self.get_obj_count(token)\n","repo_name":"Syzhet/syzhet_bot","sub_path":"SyzhetBot/misc/http_request.py","file_name":"http_request.py","file_ext":"py","file_size_in_byte":5078,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9521981608","text":"import sys\nsys.path.append(\"/Users/josephkim/Desktop/bitcoin_trading_back\")\n\nfrom fastapi import APIRouter\nfrom .parameter import *\nfrom .tradeFn import TradeFn\nfrom lib import insertLog\n\ntradeRouter = APIRouter(\n prefix=\"/trade\",\n tags=[\"trade\"]\n)\n\ntrade = TradeFn()\n\n@tradeRouter.post('/insertTradingOption')\nasync def OptionUsed(item: tradingOption):\n try:\n response = await trade.insertTradingOPtion(item)\n insertLog.log(\"매매 옵션 등록 기능 사용\")\n return response\n except:\n insertLog.log(\"매매 옵션 등록 기능 사용 실패\")\n return 444\n\n@tradeRouter.get('/tradingOptionList')\nasync def getOptionList():\n try:\n response = await trade.tradingOptionList()\n insertLog.log(\"���매 옵션 조회 기능 사용\")\n return response\n except:\n insertLog.log(\"매매 옵션 조회 기능 사용 실패\")\n return 444\n\n@tradeRouter.post('/tradingOptionDetail')\nasync def selectOptionDetail(item: getTradingOptionDetail):\n try:\n response = await trade.tradingOptionDetail(item)\n insertLog.log(\"매매 옵션 상세 조회 기능 사용\")\n return response\n except:\n insertLog.log(\"매매 옵션 상세 조회 기능 사용 실패\")\n return 444\n\n@tradeRouter.post('/updateTradingOption')\nasync def UpdateOption(item: tradingOption):\n try:\n response = await trade.updateTradingOption(item)\n insertLog.log(\"매매 옵션 수정 기능 사용\")\n return response\n except:\n insertLog.log(\"매매 옵션 수정 기능 사용 실패\")\n return 444\n\n@tradeRouter.post('/deleteTradingOption')\nasync def OptionDelete(item: deleteTradingOption):\n try:\n response = await trade.deleteTradingOption(item)\n insertLog.log(\"매매 옵션 삭제 기능 사용\")\n return response\n except:\n insertLog.log(\"매매 옵션 삭제 기능 사용 실패\")\n return 444\n\n\n@tradeRouter.post('/useTradingOption')\nasync def OptionUsed(item: useTradingOption):\n try:\n response = await trade.useTradingOption(item)\n insertLog.log(\"매매 옵션 사용 등록 기능 사용\")\n return response\n except:\n insertLog.log(\"매매 옵션 사용 등록 기능 사용 실패\")\n return 444\n\n\n@tradeRouter.get(\"/orderList\")\nasync def getoderList():\n response = await trade.getATOrderList()\n return response\n\n@tradeRouter.get('/getSearchPriceList')\nasync def getSearchList():\n try:\n response = await trade.getSearchPriceList()\n insertLog.log(\"검색 종목 조회 기능 사용\")\n return response\n except:\n insertLog.log(\"검색 종목 조회 기능 사용 실패\")\n return 444\n\n@tradeRouter.get('/getNowUsedCondition')\nasync def getNowUsedCondition():\n try:\n response = await trade.getNowUseCondition()\n insertLog.log(\"현재 사용 옵션 조회 기능 사용\")\n return response\n except:\n insertLog.log(\"현재 사용 옵션 조회 기능 사용 실패\")\n return 444\n\n@tradeRouter.get('/getTradingHis')\nasync def getTradingHis():\n try:\n response = await trade.getTradingHis()\n insertLog.log(\"자동 매매 거래 내역 조회 기능 사용\")\n return response\n except:\n insertLog.log(\"자동 매매 거래 내역 조회 기능 사용 실패\")\n return 444\n\n@tradeRouter.get('/autoTradingCheck')\nasync def autoTradingCheck():\n try:\n response = await trade.nowAutoStatusCheck()\n return response\n except:\n insertLog.log(\"자동 매매 플래그 기능 사용 실패\")\n return 444\n\n@tradeRouter.post('/controlAutoTrading')\nasync def controlAutoTrading(item: controlAT):\n try:\n response = await trade.controlAutoTrading(item.flag)\n insertLog.log(\"자동 매매 컨트롤 기능 사용\")\n return response\n except:\n insertLog.log(\"자동 매매 컨트롤 기능 사용 실패\")\n return 444","repo_name":"ohsukpyo/4season_investment","sub_path":"routers/trade/tradeApi.py","file_name":"tradeApi.py","file_ext":"py","file_size_in_byte":4001,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37487041377","text":"class Solution:\n def twoSum(self, nums: list[int], target: int) -> list[int]:\n for i,j in enumerate(nums): # 인덱스를 찾기 위해 enumerate\n iter_list = nums.copy() # 본인을 제외하고 포함된 값을 찾기 위해 복사\n iter_list.pop(i) # 본인 제거\n check = target - j # target - 본인 값이 list에 있는가?\n if check in iter_list:\n first_index = i\n second_index = nums.index(check) # 찾은 경우 해당 인덱스들을 저장\n break\n else:\n continue\n if first_index == second_index: # [3,3] 6 인 경우 0,0으로 됨\n iter_list = nums.copy() # 처음것을 지우고 그 부분에 해당하는 값의 인덱스 + 1하면 두번째 숫자의 인덱스값이 됨\n iter_list.pop(first_index)\n second_index = iter_list.index(nums[first_index])+1\n answer = []\n answer.append(first_index)\n answer.append(second_index)\n return sorted(answer) # 정렬하여 제출\n\n \"\"\"\n Runtime: 1982 ms, faster than 27.96% of Python3 online submissions for Two Sum.\n Memory Usage: 14.8 MB, less than 95.53% of Python3 online submissions for Two Sum.\n \"\"\"","repo_name":"DrunkJin/CosMos","sub_path":"220808-220814/l1/DrunkJin_l1.py","file_name":"DrunkJin_l1.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"40336152364","text":"import warnings\n\nimport autograd\nimport autograd.core\nimport autograd.extend\nimport autograd.numpy as anp\nfrom neural_structural_optimization import caching\nimport numpy as np\nimport scipy.ndimage\nimport scipy.sparse\nimport scipy.sparse.linalg\ntry:\n import sksparse.cholmod\n HAS_CHOLMOD = True\nexcept ImportError:\n warnings.warn(\n 'sksparse.cholmod not installed. Falling back to SciPy/SuperLU, but '\n 'simulations will be about twice as slow.')\n HAS_CHOLMOD = False\n\n\n# internal utilities\ndef _grad_undefined(_, *args):\n raise TypeError('gradient undefined for this input argument')\n\n\ndef _zero_grad(_, *args, **kwargs):\n def jvp(grad_ans):\n return 0.0 * grad_ans\n return jvp\n\n\n# Gaussian filter\n@autograd.extend.primitive\ndef gaussian_filter(x, width):\n \"\"\"Apply gaussian blur of a given radius.\"\"\"\n return scipy.ndimage.gaussian_filter(x, width, mode='reflect')\n\n\ndef _gaussian_filter_vjp(ans, x, width):\n del ans, x # unused\n return lambda g: gaussian_filter(g, width)\nautograd.extend.defvjp(gaussian_filter, _gaussian_filter_vjp)\n\n\n# Cone filter\ndef _cone_filter_matrix(nelx, nely, radius, mask):\n x, y = np.meshgrid(np.arange(nelx), np.arange(nely), indexing='ij')\n\n rows = []\n cols = []\n values = []\n r_bound = int(np.ceil(radius))\n for dx in range(-r_bound, r_bound+1):\n for dy in range(-r_bound, r_bound+1):\n weight = np.maximum(0, radius - np.sqrt(dx**2 + dy**2))\n row = x + nelx * y\n column = x + dx + nelx * (y + dy)\n value = np.broadcast_to(weight, x.shape)\n\n # exclude cells beyond the boundary\n valid = (\n (mask > 0) &\n ((x+dx) >= 0) &\n ((x+dx) < nelx) &\n ((y+dy) >= 0) &\n ((y+dy) < nely)\n )\n rows.append(row[valid])\n cols.append(column[valid])\n values.append(value[valid])\n\n data = np.concatenate(values)\n i = np.concatenate(rows)\n j = np.concatenate(cols)\n return scipy.sparse.coo_matrix((data, (i, j)), (nelx * nely,) * 2)\n\n\n@caching.ndarray_safe_lru_cache()\ndef normalized_cone_filter_matrix(nx, ny, radius, mask):\n \"\"\"Calculate a sparse matrix appropriate for applying a cone filter.\"\"\"\n raw_filters = _cone_filter_matrix(nx, ny, radius, mask).tocsr()\n weights = 1 / raw_filters.sum(axis=0).squeeze()\n diag_weights = scipy.sparse.spdiags(weights, 0, nx*ny, nx*ny)\n return (diag_weights @ raw_filters).tocsr()\n\n\n@autograd.extend.primitive\ndef cone_filter(inputs, radius, mask=1, transpose=False):\n \"\"\"Apply a cone filter of the given radius.\"\"\"\n inputs = np.asarray(inputs)\n filters = normalized_cone_filter_matrix(\n *inputs.shape, radius=radius, mask=mask)\n if transpose:\n filters = filters.T\n outputs = filters @ inputs.ravel(order='F')\n return outputs.reshape(inputs.shape, order='F')\n\n\ndef _cone_filter_vjp(ans, inputs, radius, mask=1, transpose=False):\n del ans, inputs # unused\n return lambda g: cone_filter(g, radius, mask, transpose=not transpose)\nautograd.extend.defvjp(cone_filter, _cone_filter_vjp)\n\n\n## a useful utility for 1D scatter operations\ndef inverse_permutation(indices):\n inverse_perm = np.zeros(len(indices), dtype=anp.int64)\n inverse_perm[indices] = np.arange(len(indices), dtype=anp.int64)\n return inverse_perm\n\n\n# the 1D scatter operation\ndef scatter1d(nonzero_values, nonzero_indices, array_len):\n all_indices = np.arange(array_len, dtype=anp.int64)\n zero_indices = anp.setdiff1d(all_indices, nonzero_indices, assume_unique=True)\n index_map = inverse_permutation(\n anp.concatenate([nonzero_indices, zero_indices]))\n u_values = anp.concatenate([nonzero_values, anp.zeros(len(zero_indices))])\n return u_values[index_map]\n\n\n@caching.ndarray_safe_lru_cache(1)\ndef _get_solver(a_entries, a_indices, size, sym_pos):\n \"\"\"Get a solver for applying the desired matrix factorization.\"\"\"\n # A cache size of one is sufficient to avoid re-computing the factorization in\n # the backwawrds pass.\n a = scipy.sparse.coo_matrix((a_entries, a_indices), shape=(size,)*2).tocsc()\n if sym_pos and HAS_CHOLMOD:\n return sksparse.cholmod.cholesky(a).solve_A\n else:\n # could also use scikits.umfpack.splu\n # should be about twice as slow as the cholesky\n return scipy.sparse.linalg.splu(a).solve\n\n\n## Sparse solver\n@autograd.primitive\ndef solve_coo(a_entries, a_indices, b, sym_pos=False):\n \"\"\"Solve a sparse system of linear equations.\n\n Args:\n a_entries: numpy array with shape (num_zeros,) giving values for non-zero\n matrix entries.\n a_indices: numpy array with shape (2, num_zeros) giving x and y indices for\n non-zero matrix entries.\n b: 1d numpy array specifying the right hand side of the equation.\n sym_pos: is the matrix guaranteed to be positive-definite?\n\n Returns:\n 1d numpy array corresponding to the solution of a*x=b.\n \"\"\"\n solver = _get_solver(a_entries, a_indices, b.size, sym_pos)\n return solver(b)\n\n\n# see autograd's np.linalg.solve:\n# https://github.com/HIPS/autograd/blob/96a03f44da43cd7044c61ac945c483955deba957/autograd/numpy/linalg.py#L40\n\n\ndef solve_coo_adjoint(a_entries, a_indices, b, sym_pos=False):\n # NOTE: not tested on complex valued inputs.\n if sym_pos:\n return solve_coo(a_entries, a_indices, b, sym_pos)\n else:\n return solve_coo(a_entries, a_indices[::-1], b, sym_pos)\n\n\ndef grad_solve_coo_entries(ans, a_entries, a_indices, b, sym_pos=False):\n def jvp(grad_ans):\n lambda_ = solve_coo_adjoint(a_entries, a_indices, grad_ans, sym_pos)\n i, j = a_indices\n return -lambda_[i] * ans[j]\n return jvp\n\n\ndef grad_solve_coo_b(ans, a_entries, a_indices, b, sym_pos=False):\n def jvp(grad_ans):\n return solve_coo_adjoint(a_entries, a_indices, grad_ans, sym_pos)\n return jvp\n\n\nautograd.extend.defvjp(\n solve_coo, grad_solve_coo_entries, _grad_undefined, grad_solve_coo_b)\n\n\n@autograd.primitive\ndef find_root(\n f, x, lower_bound, upper_bound, tolerance=1e-12, max_iterations=64):\n # Implicitly solve f(x,y)=0 for y(x) using binary search.\n # Assumes that y is a scalar and f(x,y) is monotonic in y.\n for _ in range(max_iterations):\n y = 0.5 * (lower_bound + upper_bound)\n if upper_bound - lower_bound < tolerance:\n break\n if f(x, y) > 0:\n upper_bound = y\n else:\n lower_bound = y\n return y\n\n\ndef grad_find_root(y, f, x, lower_bound, upper_bound, tolerance=None):\n # This uses a special case of the adjoint gradient rule:\n # http://www.dolfin-adjoint.org/en/latest/documentation/maths/3-gradients.html#the-adjoint-approach\n def jvp(grad_y):\n g = lambda x: f(x, y)\n h = lambda y: f(x, y)\n return -autograd.grad(g)(x) / autograd.grad(h)(y) * grad_y\n return jvp\n\n\nautograd.extend.defvjp(\n find_root, _grad_undefined, grad_find_root,\n _zero_grad, _zero_grad, _zero_grad)\n","repo_name":"google-research/neural-structural-optimization","sub_path":"neural_structural_optimization/autograd_lib.py","file_name":"autograd_lib.py","file_ext":"py","file_size_in_byte":6703,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"21"} +{"seq_id":"12531755357","text":"import json\nfrom flask import Flask, jsonify, request\nfrom tools import cleaner_job\nfrom analyzer import WatsonAnalyzer\nfrom elastic import ElasticDocument\n\n# flask instannce\napp = Flask(\"Tone Analyzer ES\")\n\n# configurations\napp.config['MAX_CONTENT_LENGTH'] = 1600 * 1024 * 1024\n\n# -------------------------------------------- #\n# Tone Analyzer End point #\n# -------------------------------------------- #\n@app.route('/analyzer/', methods=['POST'])\ndef analyzer():\n analyzer = WatsonAnalyzer(\n version='2017-09-21', \n apikey='{api_key}')\n \n data_json = json.loads(request.get_data())\n queries = {k: cleaner_job(v) for (k, v) in data_json.items()}\n \n # depend on ibm analyzer tokenizer | prepare the format accordingly\n queries = \"\\n\".join(list(queries.values()))\n tones = analyzer.analyze(queries)\n \n return tones\n\n# -------------------------------------------- #\n# Elastic Search Queries Indexer End point #\n# -------------------------------------------- #\n@app.route('/indexer/', methods=['POST'])\ndef indexer():\n document = ElasticDocument(host='127.0.0.1', port=9200)\n analyzer = WatsonAnalyzer(\n version='2017-09-21', \n apikey='{api_key}')\n \n # de-serialize data from request\n data = json.loads(request.get_data())\n \n documents = []\n for doc, contents in data.items():\n # depend on ibm analyzer tokenizer | prepare the format accordingly\n queries = {k: cleaner_job(v['reviews.text']) for (k, v) in contents.items()}\n queries = \"\\n\".join(list(queries.values()))\n tones = analyzer.analyze(queries)\n \n # prepare the format for es indexer\n documents.append({'data': contents, 'tones': tones})\n \n result = document.create(index='reviews', doc_type='hotels', body_docs=documents)\n return result\n\nif __name__ == '__main__':\n app.run(threaded=False, port=5000)","repo_name":"ahmednabil950/flask-elasticsearch-services","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73540952693","text":"import json\nfrom difflib import SequenceMatcher\nimport traceback\n\n\n# Load python docs data\nwith open('code_data.json', 'r') as f:\n all_data = json.load(f)\n\n\ndef similar(a, b):\n return SequenceMatcher(None, a, b).ratio()\n\n\ndef findMatch(value:str, matches:list, acc:float = 0.7):\n highest, matched = 0, None\n\n for string in matches:\n match = similar(value.lower(), string.lower())\n if (match >= acc or value.lower() in string.lower()) and match > highest:\n highest = match\n matched = string\n\n return matched\n\n\ndef findAllMatch(value:str, matches:list, acc:float = 0.7):\n matched = []\n\n for string in matches:\n if value.lower() in string.lower() or similar(value.lower(), string.lower()) >= acc:\n matched.append(string)\n\n return matched\n\n\ndef Find(code:str, name:str, info:str=None):\n print(name, info)\n try:\n matchedName = findMatch(name, all_data[code].keys())\n if matchedName == None: return None\n\n get_data = all_data[code][matchedName].copy()\n\n if info != None:\n info_stuff = get_data['info']\n get_data['info'] = dict()\n matchedSections = findAllMatch(info, info_stuff.keys())\n\n if matchedSections == []:\n get_data['info']['Awwww, Error 404.'] = 'See `!list {}` for sections available.'.format(matchedName)\n\n else:\n for section in matchedSections:\n get_data['info'][section] = info_stuff[section]\n \n return get_data\n\n except:\n traceback.print_stack()\n return\n\ndef getNames(code:str = None, name:str = None):\n if code == None: return ', '.join(list(all_data.keys())) + '\\n\\nRun `! ` to see! For list of topics, run `!list `.'\n \n else: \n if not code in all_data: return \"Actually... idk whats {}... See `!list` for things I know!\".format(code)\n elif name == None: return ', '.join(list(all_data[code].keys())) + '\\n\\nRun `!{0} ` to see! For what the section contains, run `!list {0}`.'.format(code)\n\n else:\n matchedName = findMatch(name, all_data[code].keys())\n if matchedName == None: return \"Actually... idk whats {}... See `!list {}` for things I know!\".format(name, code)\n return ', '.join(list(all_data[code][matchedName]['info'].keys())) + '\\n\\nRun `!{} {} [section]` to see!'.format(code, name)\n\ndef listCode():\n return list(all_data.keys())","repo_name":"benwoo1110/BenAI-bot","sub_path":"finddata.py","file_name":"finddata.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"5608922082","text":"import soundfile as sf\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nimport configparser\nimport numpy as np\nfrom obspy import UTCDateTime, read, Trace, Stream\nfrom obspy.signal.trigger import classic_sta_lta,plot_trigger\nfrom scipy.fft import fft, ifft\nimport librosa.feature as feat\nfrom tqdm import tqdm\nfrom scipy import signal\nimport soundfile as sf\n#########################################################\n# Fonctions #\n#########################################################\n\n\ndef load_soundfile(file_path, name_file):\n\n Path = file_path+\"\\\\\"+name_file\n sig, n_sample = sf.read(Path)\n sos = signal.butter(6, [5000, 100000], 'bandpass', fs=n_sample, output='sos')\n sig = signal.sosfiltfilt(sos, sig)\n trace = Trace(sig)\n trace.stats.sampling_rate = 256000\n trace.stats.station = name_file[0:-3]\n return trace, n_sample\n\n\n\ndef make_features(sig):\n # J'initialise mes variables qui vont contenir les features\n\n STA_LTA_MAX = 0\n\n # Je réalise une fenêtre glissante\n iter = 0\n T=0.2\n fech = sig.stats.sampling_rate\n window_length = 512/fech\n step = window_length/2\n for windowed_tr in tqdm(sig.slide(window_length=window_length, step=step)):\n\n\n # Je calcule le STA/LTA pour chercher dans l'extrait audio l'impulsion qui m'interresse\n\n STA_LTA = classic_sta_lta(windowed_tr,nsta=256,nlta=512)\n if STA_LTA.max() > STA_LTA_MAX:\n\n windowed_tr_computation = windowed_tr.copy()\n STA_LTA_MAX = STA_LTA.max()\n\n\n\n\n # CALCUL DES FEATURES :\n rms = feat.rms(y=windowed_tr_computation.data, frame_length=256, hop_length=64) \n sc = feat.spectral_centroid(y=windowed_tr_computation.data, sr=fech,n_fft=256, hop_length=64)\n sb = feat.spectral_bandwidth(y=windowed_tr_computation.data, sr=fech,n_fft=256, hop_length=64)\n sf = feat.spectral_flatness(y=windowed_tr_computation.data, n_fft=256, hop_length=64)\n\n features = [[np.mean(rms), np.std(rms), np.min(rms), np.max(rms),\\\n np.mean(sc), np.std(sc), np.min(sc), np.max(sc),\\\n np.mean(sb), np.std(sb), np.min(sb), np.max(sb),\\\n np.mean(sf), np.std(sf), np.min(sf), np.max(sf),\\\n STA_LTA_MAX]]\n\n return features\n\n#########################################################\n# Chargement des paramètres #\n#########################################################\n\n\n#load du fichier configparser\n\nconfig = configparser.ConfigParser()\nconfig.sections()\nconfig.read(\"Config_parametre.ini\",encoding=\"utf-8\")\n\n#ajout des paramètres\n\ndir_data = config[\"Paramètres\"][\"dir_data\"]\nfile_path_data_information = config[\"Paramètres\"][\"file_path_data_information\"]\n\n\n#########################################################\n# Programme #\n#########################################################\n\n\n\n# Je charge le fichier excel_id contenant l'information des id des fichiers audio\n\ndf_train_info = pd.read_csv(file_path_data_information,header=0)\n\n# Je calcul pour chaque fichier audio les features et je les enregistres dans un fichiers np.\n\nfeatures = np.empty((1,17)) # ATTENTION IL FAUT SUPPRIMER LE TOUT PREMIER ECHANTILLONS LORS DU CHARGEMENT\n # DES DONNEES TEST A CAUSE DE CETTE LIGNE\n\nfor name_file in tqdm(df_train_info['id']):\n sig, n_sample = load_soundfile(dir_data,name_file)\n new_features = make_features(sig)\n features = np.concatenate((features,new_features), axis=0)\n\n\nnp.save('data_Test',features)\n","repo_name":"RomanBouet/projet_Biosonar","sub_path":"Supervised_classification.py","file_name":"Supervised_classification.py","file_ext":"py","file_size_in_byte":3613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9984615962","text":"# ch4_2.py\nimport openpyxl \n\nfn = \"data4_2.xlsx\" # 來源活頁簿\nwb = openpyxl.load_workbook(fn)\nws = wb.active\n\nnew_wb = openpyxl.Workbook() # 建立目的的活頁簿\nnew_ws = new_wb.active\n\nfor m in range(1, ws.max_row+1):\n for n in range(65, 65+ws.max_column): # 65是A\n ch = chr(n) # 將ASCII碼值轉字元\n index = ch + str(m)\n data = ws[index].value\n new_ws[index].value = data # 寫入目的活頁簿\n\nnew_wb.save(\"out4_2.xlsx\") # 儲存結果\n\n\n\n\n\n","repo_name":"June0608/Python","sub_path":"Program Examples/ch4/ch4_2.py","file_name":"ch4_2.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43808705647","text":"\"\"\"Added Hero model\n\nRevision ID: 4bf8df862b49\nRevises: 1cd1c1517f1\nCreate Date: 2013-01-30 02:01:37.216117\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '4bf8df862b49'\ndown_revision = '1cd1c1517f1'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('hero',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=250), nullable=True),\n sa.Column('proper_name', sa.String(length=250), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name'),\n sa.UniqueConstraint('proper_name')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('hero')\n ### end Alembic commands ###\n","repo_name":"bobbyrward/dota2_stats","sub_path":"alembic/versions/4bf8df862b49_added_hero_model.py","file_name":"4bf8df862b49_added_hero_model.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"2165162569","text":"import os\nfrom google.cloud import pubsub_v1\nfrom concurrent.futures import TimeoutError\n\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = \"custom-hold-311317-fcf198d7be95.json\"\n\nPROJECT_ID = 'custom-hold-311317'\nSUBSCRIPTION_ID = 'fire-preds-sub'\nMODEL_NAME = 'FireSage'\nVERSION_NAME = 'FireSage_v1'\ntimeout = 700.0\n\n\nsubscriber = pubsub_v1.SubscriberClient()\nsubscription_path = subscriber.subscription_path(PROJECT_ID, SUBSCRIPTION_ID)\n\ndef callback(message):\n print(f\"Recieved message: {message}\")\n message.ack()\n\nstreaming_pull_future = subscriber.subscribe(subscription_path, callback=callback)\nprint(f\"listening for predictions on {subscription_path}... \\n\")\n\nwith subscriber:\n try: \n streaming_pull_future.result(timeout=timeout)\n except TimeoutError:\n streaming_pull_future.cancel()\n","repo_name":"danny-ell77/FireSage","sub_path":"subscriber.py","file_name":"subscriber.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"3837877284","text":"import requests\nfrom stem import Signal\nfrom stem.control import Controller\nfrom fake_useragent import UserAgent\nimport random, time\nimport urllib\nimport json\nimport logging.config\nimport sys\n\n# signal TOR for a new connection \n# https://stackoverflow.com/questions/30286293/make-requests-using-python-over-tor\n# https://jarroba.com/anonymous-scraping-by-tor-network/\n# https://gist.github.com/KhepryQuixote/46cf4f3b999d7f658853\n\n\n# Connection manager\nclass ConnectionManager:\n\n # initial\n def __init__(self):\n # logger\n self.logger = ConnectionManager.get_logger(level = logging.DEBUG, dest = \"\", verbose = 0)\n self.logger.info(\"initial connection manager...\")\n self.headers = { 'User-Agent': UserAgent().random }\n self.proxies = {\n 'http': 'socks5://127.0.0.1:9050',\n 'https': 'socks5://127.0.0.1:9050'\n }\n # self.current_ip = self.get_request(url = \"https://ident.me\").text\n self.current_ip = self.get_request(url = \"http://api.ipify.org\").text\n self.get_connection_info(self.current_ip)\n\n # get request\n def get_request(self, url):\n rs = requests.get(url, proxies = self.proxies, headers = self.headers)\n time.sleep(4)\n return rs\n\n # get connection info\n def get_connection_info(self, ip):\n response = urllib.request.urlopen(f\"http://ipinfo.io/{ip}/json\")\n data = json.load(response)\n city = data['city']\n country = data['country']\n region = data['region']\n provider = data['org']\n self.logger.info(f\"IP Address: {ip} \\t City: {city} \\t Region: {region} \\t Country: {country} \\t Provider: {provider}\")\n\n # Renew connection\n def renew_connection(self):\n while True:\n with Controller.from_port(port = 9051) as c:\n # c.authenticate(password=\"password\")\n c.authenticate(password = \"SomeThingDunnoxD\")\n c.signal(Signal.NEWNYM)\n\n wait = random.uniform(0, 5)\n self.logger.warning(\"Wait : \" + str(wait))\n time.sleep(wait)\n\n new_ip = self.get_request(url = 'https://api.ipify.org').text\n self.get_connection_info(new_ip)\n # check ip change \n if self.current_ip != new_ip:\n break\n\n # logger\n @staticmethod\n def get_logger(level = logging.DEBUG, dest='', verbose = 0):\n \"\"\"Returns a logger.\"\"\"\n logger = logging.getLogger(__name__)\n\n dest += '/' if (dest != '') and dest[-1] != '/' else ''\n fh = logging.FileHandler(dest + 'proxy.log', 'w')\n fh.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))\n fh.setLevel(level)\n logger.addHandler(fh)\n\n sh = logging.StreamHandler(sys.stdout)\n sh.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))\n # warning, info, error\n sh_lvls = [logging.ERROR, logging.WARNING, logging.INFO]\n sh.setLevel(sh_lvls[verbose])\n logger.addHandler(sh)\n\n logger.setLevel(level)\n\n return logger\n\n\n\n\n \n\n\n\n\n","repo_name":"smb-h/instabot","sub_path":"library/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":3116,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"1659940216","text":"\"\"\"\nTitle : 휴먼 파이프라인\nLink : https://www.acmicpc.net/problem/22981\n\"\"\"\n\nimport sys\ninput = sys.stdin.readline\nMIIS = lambda: map(int, input().split())\n\nn, k = MIIS()\nworkers = sorted(list(MIIS()), key=lambda x:-x)\n\nfirst_team = 1 * workers[0]\nsecond_team = (n - 1) * workers[-1]\n\nfor i in range(1, n - 1):\n f = (i + 1) * workers[i]\n s = (n - i - 1) * workers[-1]\n if f + s > first_team + second_team:\n first_team = f\n second_team = s\n\nteam_power = first_team + second_team\nif k % team_power:\n print(k // team_power + 1)\nelse:\n print(k // team_power)\n\n'''\nCounter Example\n4 16\n6 5 4 3\nans : 1\n'''","repo_name":"mintropy/algorithm_pulzo","sub_path":"이영준/2021/09/0930/22981.py","file_name":"22981.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"8311196550","text":"# Run Loan Book Simulation python code\nimport math\nimport numpy as np\nimport pandas as pd\n\n\nclass LoanBookSimulation:\n def __init__(self, max_months, initial_savings, regulatory_capital_ratio):\n self.max_month = max_months\n self.initial_savings = initial_savings\n self.current_savings = initial_savings\n self.regulatory_capital_ratio = regulatory_capital_ratio\n self.loan_book = []\n self.customer_payments = []\n self.deposit_account = []\n self.prepayed_loans = 0\n\n # Parameters\n # max_month = 60\n # months = range(max_month)\n # initial_savings = 10 * 1e6 # £1 million\n # current_savings = initial_savings\n # prepayed_loans = 0\n # regulatory_capital_ratio = 0.5\n # loan_book = []\n # customer_payments = []\n # deposit_account = []\n\n # Market rates simulation using CIR process\n def CIR_process(self, r0, kappa, theta, sigma, dt, n):\n r = np.zeros(n)\n r[0] = r0\n for i in range(1, n):\n gamma = sigma * np.sqrt(math.fabs(r[i - 1]))\n z = np.random.randn()\n r[i] = np.abs(r[i - 1] + kappa * (theta - r[i - 1]) * dt + gamma * np.sqrt(dt) * z)\n return r\n\n # Customer arrival simulation as a Poisson process\n def simulate_customers(self, lam, duration):\n t = 0\n customers = []\n while t < duration:\n interarrival_time = np.random.exponential(1 / lam)\n t += interarrival_time\n if t < duration:\n customers.append(t)\n return customers\n\n # Generate loan parameters for a customer\n def generate_loan_parameters(self):\n credit_quality = np.random.uniform(0, 1)\n loan_amount = np.random.randint(1000, 25001)\n loan_term = np.random.randint(12, self.max_month + 1)\n fixed_rate = np.random.uniform(0.02, 0.08)\n return credit_quality, loan_amount, loan_term, fixed_rate\n\n # Simulate loan book over 60 months\n def simulate_loan_book(self):\n cir_params = {'r0': 0.03, 'kappa': 1, 'theta': 0.045, 'sigma': 0.1}\n market_rates = self.CIR_process(cir_params['r0'], cir_params['kappa'], cir_params['theta'], cir_params['sigma'],\n 1 / 12, self.max_month)\n discount_factors = np.cumprod(1 / (1 + market_rates / 12))\n customer_arrivals = self.simulate_customers(15, self.max_month) # Assuming average of 10 customers per month\n\n # Now perform the simulation:\n for month in range(self.max_month):\n # Remove customers who have completed their repayments to term\n deposit_account_maturities = [loan['loan_amount'] for loan in self.loan_book if loan['loan_term'] > month]\n self.current_savings += sum(deposit_account_maturities)\n self.loan_book = [loan for loan in self.loan_book if\n (loan['loan_term'] > month and self.deposit_account.append)]\n # Add new customers\n for arrival in customer_arrivals:\n if arrival <= month:\n credit_quality, loan_amount, loan_term, fixed_rate = self.generate_loan_parameters()\n loan = {'credit_quality': credit_quality, 'loan_amount': loan_amount, 'loan_term': loan_term,\n 'fixed_rate': fixed_rate}\n if loan['loan_amount'] <= self.current_savings:\n self.loan_book.append(loan)\n self.current_savings -= loan['loan_amount']\n # Calculate monthly customer payments and remove customers who prepay\n for loan in self.loan_book:\n market_rate = market_rates[month]\n if loan['fixed_rate'] < market_rate:\n monthly_payment = loan['loan_amount'] * loan['fixed_rate'] / 12\n else:\n self.current_savings += loan['loan_amount']\n self.prepayed_loans += loan['loan_amount']\n self.loan_book.remove(loan)\n continue\n loan['loan_amount'] -= monthly_payment\n if loan['loan_amount'] <= 0:\n self.loan_book.remove(loan)\n self.customer_payments.append(monthly_payment)\n # Save current_savings\n self.deposit_account.append(discount_factors[month] * self.current_savings)\n\n def loan_book_segmentation(self):\n credit_quality = np.zeros(len(self.loan_book))\n loan_term = np.zeros(len(self.loan_book))\n loan_amount = np.zeros(len(self.loan_book))\n fixed_rate = np.zeros(len(self.loan_book))\n\n # Extract the values from the loan book\n for i, loan in enumerate(self.loan_book):\n credit_quality[i] = loan['credit_quality']\n loan_term[i] = loan['loan_term']\n loan_amount[i] = loan['loan_amount']\n fixed_rate[i] = loan['fixed_rate']\n\n # Define cut-off values for credit quality, loan term, loan amount, and fixed rate\n credit_quality_cutoffs = np.percentile(credit_quality, [33, 66]).round(4)\n loan_term_cutoffs = np.percentile(loan_term, [33, 66]).round(0)\n loan_amount_cutoffs = np.percentile(loan_amount, [33, 66]).round(-2) # nearest 100\n fixed_rate_cutoffs = np.percentile(fixed_rate, [33, 66]).round(4)\n\n # Segment the personal loans book\n credit_quality_segment = np.digitize(credit_quality, credit_quality_cutoffs)\n loan_term_segment = np.digitize(loan_term, loan_term_cutoffs)\n loan_amount_segment = np.digitize(loan_amount, loan_amount_cutoffs)\n fixed_rate_segment = np.digitize(fixed_rate, fixed_rate_cutoffs)\n\n # Make dataframe of book with segments\n loan_book_segmentation = pd.DataFrame({\n 'credit_quality': credit_quality.round(4),\n 'credit_quality_segment': credit_quality_segment,\n 'loan_term': loan_term.round(0),\n 'loan_term_segment': loan_term_segment,\n 'loan_amount': loan_amount.round(-2),\n 'loan_amount_segment': loan_amount_segment,\n 'fixed_rate': fixed_rate.round(4),\n 'fixed_rate_segment': fixed_rate_segment})\n\n pd.set_option('display.max_columns', len(loan_book_segmentation.columns))\n return loan_book_segmentation\n\n def split_loan_into_tranches(self, loans):\n loan_tranches = {}\n types = [0, 1, 2]\n dict_map = {'0': 'L', '1': 'M', '2': 'H'}\n for amount_segment in types:\n for term_segment in types:\n for fixed_rate_segment in types:\n seg_key = [str(amount_segment), str(term_segment), str(fixed_rate_segment)]\n segment = ''.join([dict_map[digit] for digit in seg_key])\n loan_tranches[segment] = loans[(loans['loan_amount_segment'] == amount_segment) & (loans[\n 'loan_term_segment'] == term_segment) &\n (loans['fixed_rate_segment'] == fixed_rate_segment)]\n\n return loan_tranches\n","repo_name":"kasirajr/tesco-industry-project","sub_path":"loan_simulation/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38953578269","text":"class Node: # 노드 안에는 데이터(data)와 다음 칸을 나타내는 포인터(next)가 있어야 함.\r\n def __init__(self, data): # 매개변수 data에 입력받아\r\n self.data = data # self.data에 저장\r\n self.next = None\r\n\r\n\r\nnode = Node(3)\r\nfirst_node = Node(4)\r\nnode.next = first_node\r\n\r\nclass LinkedList: # 링크드리스트에는 head node만 가지고 있으면 된다.\r\n def __init__(self, data):\r\n self.head = Node(data) # 해당 데이터를 들고 있는 노드를 생성해서 넣어 준다.\r\n\r\n def append(self, data):\r\n if self.head is None:\r\n self.head = Node(data)\r\n return\r\n\r\n cur = self.head\r\n while cur.next is not None:\r\n cur = cur.next\r\n cur.next = Node(data)\r\n\r\n def print_all(self):\r\n print('hihihi')\r\n cur = self.head\r\n while cur is not None:\r\n print(cur.data)\r\n cur = cur.next\r\n# [3] -> [4] -> [5] -> [6] -> None\r\n\r\nlinked_list = LinkedList(3)\r\n\r\nlinked_list.append(4)\r\nlinked_list.append(5)\r\nlinked_list.print_all()","repo_name":"kgs1209/algo","sub_path":"week_2/01_print_all_linked_list.py","file_name":"01_print_all_linked_list.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73448509492","text":"\"\"\"\nThis is code that handles step 2 of the authentication process.\n\nSee the test for this file in tests/unit for more info.\n\"\"\"\nfrom pynamodb.exceptions import TransactWriteError\nfrom tripit.auth.models import TripitRequestToken, TripitAccessToken\nfrom tripit.core.v1.oauth import request_access_token\nfrom tripit.logging import logger\n\n\ndef handle_callback(request_token):\n \"\"\"\n Handles TripIt callbacks and persists access tokens with access keys for\n future use.\n \"\"\"\n access_key = get_access_key_from_request_token(request_token)\n if access_key is None:\n logger.error(\"This token hasn't been mapped yet: %s\", request_token)\n return False\n request_token_secret = get_token_secret_from_request_token(request_token)\n if request_token_secret is None:\n logger.error(\"BUG: No token secret mapped to request token from step 1: %s\", access_key)\n return False\n access_token_data = request_access_token(request_token, request_token_secret)\n if access_token_data is None:\n logger.error(\"Failed to obtain an access token from request token %s\", request_token)\n return False\n try:\n TripitAccessToken.insert(\n access_key=access_key,\n token=access_token_data[\"token\"],\n token_secret=access_token_data[\"token_secret\"],\n )\n return True\n except TransactWriteError:\n logger.error(\"Failed to write new token; see logs above\")\n return False\n\n\ndef get_token_secret_from_request_token(token):\n \"\"\"\n Retrieves request token secrets from request_tokens\n \"\"\"\n try:\n return TripitRequestToken.as_dict(token)[\"token_secret\"]\n except TripitRequestToken.DoesNotExist:\n return None\n\n\ndef get_access_key_from_request_token(token):\n \"\"\"\n Retrieves access key from request_tokens\n \"\"\"\n try:\n return TripitRequestToken.as_dict(token)[\"access_key\"]\n except TripitRequestToken.DoesNotExist:\n return None\n","repo_name":"carlosonunez/tripit-apis","sub_path":"tripit/auth/step_2.py","file_name":"step_2.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11651878051","text":"import torch\nimport torch.nn as nn\nfrom torchmetrics import F1Score\n\nfrom .build_loss import register_loss\n\n\n@register_loss\nclass F1(nn.Module):\n def __init__(self, pred_name, target_name, threshold,\n display_name=None, display_tqdm=False):\n super(F1, self).__init__()\n self.pred_name = pred_name\n self.target_name = target_name\n self.loss_name = display_name\n\n self.metric = F1Score(\n threshold=threshold, num_classes=1\n )\n\n self.preds = []\n self.targets = []\n \n def forward(self, container):\n y_pred = container[self.pred_name].float()\n y_pred = torch.sigmoid(y_pred)\n y_true = container[self.target_name].float()\n\n self.preds.append(y_pred.detach().cpu())\n self.targets.append(y_true.detach().cpu())\n \n def calc_metric(self):\n f1_score = self.metric(\n torch.cat(self.preds),\n torch.cat(self.targets).to(torch.int)\n )\n return {\n 'F1_score': f1_score,\n }\n \n def reset(self):\n self.preds = []\n self.targets = []","repo_name":"achilleess/inca_digital_challenge","sub_path":"bot_detector/losses/f1_score.py","file_name":"f1_score.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22611734201","text":"from pagermaid import bot\nfrom pagermaid.listener import listener\nfrom pagermaid.utils import alias_command, attach_log, lang\n\n\n@listener(\n is_plugin=True,\n outgoing=True,\n command=alias_command(\"parse\"),\n description=\"Parse Mode Test\",\n parameters=\"() \",\n)\nasync def parse(context):\n reply = await context.get_reply_message()\n mode = context.arguments.replace(\" \", \"\")\n if not reply:\n return await context.edit(\"Please reply to a message.\\n请回复一条消息。\")\n if not mode or mode == \"h\":\n bot.parse_mode = \"html\"\n edit_mode = None\n elif mode == \"m\":\n bot.parse_mode = \"markdown\"\n edit_mode = None\n elif mode == \"n\":\n bot.parse_mode = None\n edit_mode = None\n elif mode == \"nh\":\n bot.parse_mode = None\n edit_mode = \"html\"\n elif mode == \"nm\":\n bot.parse_mode = None\n edit_mode = \"markdown\"\n else:\n return await context.edit(lang(\"arg_error\"))\n\n if len(reply.text) > 4096:\n return await attach_log(reply.text, context.chat_id, \"parse.txt\", context.id)\n await bot.edit_message(\n context.chat_id, context.id, reply.text, parse_mode=edit_mode\n )\n bot.parse_mode = \"markdown\"\n","repo_name":"Oreomeow/PagerMaid_Plugins","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72808785334","text":"from __future__ import annotations\nfrom contextlib import contextmanager\nfrom functools import partial\n\nfrom redio import conv\nfrom redio.commands import CommandBase\nfrom redio.conn import ConnectInfo\nfrom redio.exc import ProtocolError, RedisError\nfrom redio.protocol import Protocol\nfrom redio.pubsub import PubSub\n\nclass Redis:\n \"\"\"Redis connection pool.\"\"\"\n def __init__(self, url=\"redis://localhost/\", *, ssl_context=None, pool_max=100):\n self.conninfo = ConnectInfo.from_url(\n url,\n ssl_context=ssl_context,\n )\n self.pool_max = pool_max\n self.pool = []\n\n def __call__(self) -> DB:\n \"\"\"Get a Redis database connection.\"\"\"\n return DB(self)\n\n def pubsub(self, *channels) -> PubSub:\n \"\"\"Create a publish/subscribe receiver.\"\"\"\n return PubSub(self._borrow_connection(), *channels)\n\n def _borrow_connection(self) -> Protocol:\n return self.pool.pop() if self.pool else Protocol(self.conninfo)\n\n def _restore_connection(self, connection: Protocol):\n if len(self.pool) < self.pool_max:\n self.pool += connection,\n\n\nclass DB(CommandBase, conv.ByteDecoder):\n \"\"\"Redis database connection (high level API).\"\"\"\n def __init__(self, redis: Redis):\n super().__init__()\n self.redis = redis\n self.protocol = redis._borrow_connection()\n self.commands = []\n\n def __del__(self):\n \"\"\"Restore still usable connection to pool on garbage collect. We rely\n partially on CPython's reference counting but also note that it is not\n crucial for connections to be returned immediately.\"\"\"\n if self.redis and not self.protocol.closed:\n self.redis._restore_connection(self.protocol)\n\n @property\n def prevent_pooling(self):\n \"\"\"Prevent this connection being returned to connection pool.\"\"\"\n self.redis = None\n return self\n\n def __await__(self):\n \"\"\"Execute any pending commands and return their results.\n\n Generally there is one response per each command but some commands may\n not return anything.\n\n RedisError objects may be returned instead of being raised because the\n database does not abort or rollback anything, and thus all responses\n should be returned.\n\n Two or more responses are returned as a list.\"\"\"\n return self._run().__await__()\n\n async def _run(self):\n \"\"\"Execute queued commands, equivalent to await self.\"\"\"\n if self.protocol.closed:\n await self.protocol.connect()\n self._transaction_server = None\n try:\n if self.commands:\n return await self._execute()\n except:\n # Any error and we assume that the connection is in invalid state.\n self.prevent_pooling\n await self.protocol.aclose()\n self.protocol = None\n raise\n\n async def _execute(self):\n \"\"\"Execute queued commands without error handling.\"\"\"\n commands = []\n handlers = []\n for handler, cmd in self.commands:\n handlers.append(handler)\n commands.append([conv.encode(a) for a in cmd])\n self.commands = []\n if self._transaction_state is not None:\n self.prevent_pooling # TODO: There are cases where we can resume pooling\n res = await self.protocol.run(commands)\n ret = _handle_response(handlers, res)\n ret = self._decode(ret)\n self.bytedecoder(None)\n return ret if len(ret) != 1 else ret[0]\n\n def _command(self, *cmd, handler=None):\n if isinstance(self._transaction_state, list):\n self.commands.append((\"QUEUED\", cmd))\n self._transaction_state.append(handler)\n else:\n self.commands.append((handler, cmd))\n return self\n\ndef _handle_response(handlers, res):\n \"\"\"Run handlers, check for errors and unpack transaction results.\"\"\"\n ret = []\n if len(handlers) != len(res):\n raise Exception(f\"BUG in redio: lists are different length:\\n handlers={handlers}\\n res={res}\\n\")\n for h, r in zip(handlers, res):\n if h is None:\n ret += r,\n continue\n if isinstance(h, str):\n if r != h:\n raise ProtocolError(f\"Expected {h}, got {r}\")\n continue\n if isinstance(h, list):\n # EXEC command (transaction result)\n if r:\n r = _handle_response(h, r)\n ret.append(r or r is not False) # non-empty list, True or False\n continue\n if isinstance(h, Exception):\n raise h\n ret += h(r),\n return ret\n","repo_name":"Tronic/redio","sub_path":"redio/highlevel.py","file_name":"highlevel.py","file_ext":"py","file_size_in_byte":4681,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"21"} +{"seq_id":"72812665014","text":"import numpy as np\nimport starspot as ss\nfrom starspot import sigma_clipping\nfrom astropy.io import fits\nimport os\nimport matplotlib.pyplot as plt\n\ntics_list = np.load('data/all_dled_tics.npy') # len= 24084 VS cleaned_LightCurves below len = 24090\n# tics_list = [25063396,990000,149603524,900000] #good,bad,good,bad tics ##examples to test code\n\n\n#to store calculated stats\ntic_list = [] #to verify picking order stays same\nerr_stmts=[]\npdms_err =[] \npdms =[]\n\n#opening cleaned lcfiles\nfor count,tic in enumerate(tics_list):\n\tprint('starting work on tic:',tic)\n\tprint('starting work on tic:',tic)\n\tprint('starting work on tic:',tic)\n\tprint('starting work on tic:',tic)\n\tprint('starting work on tic:',tic)\n\tprint('\t\t\tstarting work on tic:',tic, ' and with count: ',count)\n\tprint('\t\t\tstarting work on tic:',tic, ' and with count: ',count)\n\ttry: \n\t\tlc = fits.open('data/SECONDRUN/cleaned_LightCurves/{}/lc.fits'.format(tic)) #lk cant find flux attribute\n\t\tdata = lc[1].data #all the data\n\t\tflux1 = data['FLUX']\n\t\tflux_err1 = data['FLUX_ERR']\n\t\ttime1 = data['TIME']\n\t\tcadence = data['CADENCENO']\n\t\tquality = data['QUALITY']\n#extra cleaning- ruths tutorial\n # Calculate the median so that we can median-normalize.\n\t\tmed = np.median(flux1)\n # Do an initial sigma clip to remove big outliers.\n\t\tm = sigma_clipping.sigma_clip(flux1/med - 1, nsigma=5)\n\t\tx, y, yerr = time1[m], flux1[m]/med - 1, flux_err1[m]/med\n\n # Then a sigma clip using a Sav-Gol filter for smoothing\n\t\tsmooth, mask = sigma_clipping.filter_sigma_clip(x, y, window_length=199)\n\t\ttime, flux, flux_err = x[mask], y[mask], yerr[mask]\n#creating model & gathering stats\n\t\trotate = ss.RotationModel(time, flux, flux_err)\n#ls\n\t\tls_period = rotate.ls_rotation(high_pass=True) #added highpass filter\n\t\tpower = rotate.power\n\t\tfreq = rotate.freq\n\t\tfilename_LSpower = 'data/FOURTHRUN/data_arrs/{}/ls_power'.format(tic)\n\t\tfilename_LSfreq = 'data/FOURTHRUN/data_arrs/{}/ls_freq'.format(tic)\n\t\tos.makedirs(os.path.dirname(filename_LSpower), exist_ok=True)\n\t\tos.makedirs(os.path.dirname(filename_LSfreq), exist_ok=True)\n\t\tnp.save(filename_LSpower,power)\n\t\tnp.save(filename_LSfreq,freq)\n\t\tps = 1./freq\n\t\tpeaks = np.array([i for i in range(1, len(ps)-1) if power[i-1] < \\\n power[i] and power[i+1] < power[i]])\n\t\tpeak_amps_low2high = np.sort(power[peaks])\n\t\tsecond_rp = ps[power == peak_amps_low2high[-2]][0]\n\t\tthird_rp = ps[power == peak_amps_low2high[-3]][0]\n#acf\n\t\ttess_cadence = 1./24./30.\n\t\tacf_rp = rotate.acf_rotation(tess_cadence) # tess cadence equivalent to interval = 'TESS' in starspot docs\n\t\tx2 = rotate.lags\n\t\ty2 = rotate.acf\n\t\tfilename_ACF_lags = 'data/FOURTHRUN/data_arrs/{}/acf_lags'.format(tic)\n\t\tfilename_ACF_acf = 'data/FOURTHRUN/data_arrs/{}/acf_ys'.format(tic)\n\t\tos.makedirs(os.path.dirname(filename_ACF_lags), exist_ok=True)\n\t\tos.makedirs(os.path.dirname(filename_ACF_acf), exist_ok=True)\n\t\tnp.save(filename_ACF_lags,x2)\n\t\tnp.save(filename_ACF_acf,y2)\n\t\tpeaks2 = np.array([i for i in range(1,len(y2)-1) if y2[i-1] < y2[i] and \\\n y2[i+1] = 2:\n\t\t\tacfrp2 = xpeaks[1]\n\t\telse:\n\t\t\tacfrp2 = acf_rp\n\t\tif len(xpeaks) >= 3:\n\t\t\tacfrp3 = xpeaks[2]\n\t\telse:\n\t\t\tacfrp3 = acf_rp\n#plots\n \n\t\tfig, axs = plt.subplots(3,1,figsize=(16,10))\n\t\tplt.subplots_adjust(hspace=0.5)\n\t\taxs[0].scatter(time1,flux1,color='k',s=.5,label='minimally cleaned')\n\t\taxs[0].plot(x, smooth+1,color='orange', label=\"Smoothed light curve\")\n\t\taxs[0].set_xlabel('Time [days]')\n\t\taxs[0].set_ylabel('Relative Flux')\n\t\taxs[0].set_title('Stiched Light Curve for TIC:{}'.format(tic),fontsize=30);\n\t\taxs[0].legend(prop={'size': 12})\n \n\t\taxs[1].plot(-np.log10(freq), power, \"k\", zorder=0)\n\t\taxs[1].axvline(np.log10(ls_period), color=\"C1\", lw=4, alpha=0.5,\n zorder=1,label=('{} days'.format(ls_period)))\n\t\taxs[1].axvline(np.log10(second_rp),lw=4,alpha=0.5,zorder=2,linestyle='--',color='cyan',label=('{} days'.format(second_rp)))\n\t\taxs[1].axvline(np.log10(third_rp),lw=4,alpha=0.5,zorder=3,linestyle=(0, (1, 10)),color='g',label=('{} days'.format(third_rp)))\n\t\taxs[1].set_xlabel(\"log10(Period [days])\")\n\t\taxs[1].set_ylabel(\"Power\");\n\t\taxs[1].set_title('Lomb-Scargle Periodogram for TIC:{}'.format(tic),fontsize=30)\n\t\taxs[1].legend(prop={'size': 15})\n\n\t\taxs[2].plot(x2,y2,color='k')\n\t\taxs[2].axvline(acf_rp,color=\"C1\",label='{}days'.format(acf_rp))\n\t\taxs[2].axvline(acfrp2,color='cyan',linestyle='--',label='{} days'.format(acfrp2))\n\t\taxs[2].axvline(acfrp3,color='green',linestyle=(0, (1, 10)),label=\"{} days\".format(acfrp3))\n\t\taxs[2].set_xlabel(\"Period [days]\")\n\t\taxs[2].set_ylabel(\"Correlation\")\n\t\taxs[2].set_xlim(-0.5,max(x2))#acfrp3+5)\n\t\taxs[2].set_title('ACF for TIC:{}'.format(tic),fontsize=30)\n\t\taxs[2].legend(prop={'size': 15})\n \n\t\tplt.tight_layout()\n\t\tfilename = 'data/FOURTHRUN/plots/{}/ls_acf_plots'.format(tic)\n\t\tos.makedirs(os.path.dirname(filename), exist_ok=True)\n\t\tplt.savefig(filename)\n\t\tplt.close()\n \n#pdm\n\t\tperiod_grid = np.linspace(.1,15,1000) #max period is 15 days to match likely ls periods and save time\n\t\tpdm_rp, pdm_err = rotate.pdm_rotation(period_grid, pdm_nbins = 10) #default 10..will slow down code to increase but may want to in future\n\t\tprint('pdm:',pdm_rp)\n\t\ttic_list.append(tic)\n\t\tnp.save('data/FOURTHRUN/tic_order_pdm',tic_list)\n\t\tpdms.append(pdm_rp)\n\t\tnp.save('data/FOURTHRUN/pdm',pdms)\n\t\tpdms_err.append(pdm_err)\n\t\tnp.save('data/FOURTHRUN/pdm_err',pdms_err)\n#pdm plots\n\t\trotate.pdm_plot()\n\t\tplt.title('TIC:{} with period:{} days'.format(tic,pdm_rp),fontsize=20,loc='left')\n\t\tfilename2 = 'data/FOURTHRUN/plots/{}/pdm_plots'.format(tic)\n\t\tos.makedirs(os.path.dirname(filename2), exist_ok=True)\n\t\tplt.savefig(filename2)\n\t\tplt.close()\n\t\tprint('ENDING TIC:',tic)\n\texcept Exception as e:\n\t\tstmt = 'TIC {} had error {}\\n'.format(tic,e)\n# \t\terr_stmts.append(stmt)\n\t\tprint('TIC: {} had an exception and ended'.format(tic))\n\t\twith open(\"data/FOURTHRUN/exception_handling.txt\", \"a\") as text_file:\n\t\t\ttext_file.write(\"\\n\")\n\t\t\ttext_file.write(stmt)\n\t\t\t\n\n\n","repo_name":"deerow22/helenResearch","sub_path":"FOURTH_run.py","file_name":"FOURTH_run.py","file_ext":"py","file_size_in_byte":6100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6510002148","text":"#!/usr/bin/env python\n\"\"\"Advent of Code 2020 - Day 11 - Solution by Julian Knorr (git@jknorr.eu)\"\"\"\nimport sys\nfrom typing import List, Tuple\n\n\nOCCUPIED_STR = \"#\"\nFLOOR_STR = \".\"\nEMPTY_STR = \"L\"\nOCCUPIED = 2\nEMPTY = 1\nFLOOR = 0\nState = List[List[int]]\n\n\ndef count_occupied_adjacent_seats(state: State, row: int, column: int) -> int:\n result = 0\n rows = len(state)\n columns = len(state[0])\n if row > 0:\n result += int(state[row - 1][column] == OCCUPIED)\n if column > 0:\n result += int(state[row - 1][column - 1] == OCCUPIED)\n if column < columns - 1:\n result += int(state[row - 1][column + 1] == OCCUPIED)\n if row < rows - 1:\n result += int(state[row + 1][column] == OCCUPIED)\n if column > 0:\n result += int(state[row + 1][column - 1] == OCCUPIED)\n if column < columns - 1:\n result += int(state[row + 1][column + 1] == OCCUPIED)\n if column > 0:\n result += int(state[row][column - 1] == OCCUPIED)\n if column < columns - 1:\n result += int(state[row][column + 1] == OCCUPIED)\n return result\n\n\ndef next_state(state: State) -> Tuple[State, bool]:\n rows = len(state)\n columns = len(state[0])\n new_state = []\n for r in range(rows):\n new_state.append([FLOOR] * columns)\n modified = False\n for r in range(rows):\n for c in range(columns):\n if state[r][c] != FLOOR:\n occupied_adjacent_seats = count_occupied_adjacent_seats(state, r, c)\n if state[r][c] == EMPTY:\n if occupied_adjacent_seats == 0:\n new_state[r][c] = OCCUPIED\n modified = True\n else:\n new_state[r][c] = EMPTY\n elif state[r][c] == OCCUPIED:\n if occupied_adjacent_seats >= 4:\n new_state[r][c] = EMPTY\n modified = True\n else:\n new_state[r][c] = OCCUPIED\n return new_state, modified\n\n\ndef occupied_seat_visible(state: State, row: int, column: int, direction_row: int, direction_column: int) -> bool:\n rows = len(state)\n columns = len(state[0])\n while True:\n row += direction_row\n column += direction_column\n if not (0 <= row < rows) or not (0 <= column < columns):\n return False\n if state[row][column] == OCCUPIED:\n return True\n if state[row][column] == EMPTY:\n return False\n\n\ndef count_occupied_visible_seats(state: State, row: int, column: int) -> int:\n result = 0\n for row_direction in range(-1, 2):\n for column_direction in range(-1, 2):\n if row_direction != 0 or column_direction != 0:\n result += int(occupied_seat_visible(state, row, column, row_direction, column_direction))\n return result\n\n\ndef next_state2(state: State) -> Tuple[State, bool]:\n rows = len(state)\n columns = len(state[0])\n new_state = []\n for r in range(rows):\n new_state.append([FLOOR] * columns)\n modified = False\n for r in range(rows):\n for c in range(columns):\n if state[r][c] != FLOOR:\n occupied_seats = count_occupied_visible_seats(state, r, c)\n if state[r][c] == EMPTY and occupied_seats == 0:\n new_state[r][c] = OCCUPIED\n modified = True\n elif state[r][c] == OCCUPIED and occupied_seats >= 5:\n new_state[r][c] = EMPTY\n modified = True\n else:\n new_state[r][c] = state[r][c]\n return new_state, modified\n\n\ndef count_occupied_seats(state: State) -> int:\n result = 0\n for row in state:\n for seat in row:\n if seat == OCCUPIED:\n result += 1\n return result\n\n\ndef task1(puzzle: State) -> int:\n new_state = puzzle\n modified = True\n while modified:\n new_state, modified = next_state(new_state)\n return count_occupied_seats(new_state)\n\n\ndef task2(puzzle: State) -> int:\n new_state = puzzle\n modified = True\n while modified:\n new_state, modified = next_state2(new_state)\n return count_occupied_seats(new_state)\n\n\ndef read_puzzle_file(filename: str) -> State:\n file = open(filename, 'r')\n state = []\n columns = 0\n for line in file.readlines():\n row = []\n for s in line.strip():\n if s == FLOOR_STR:\n row.append(FLOOR)\n elif s == EMPTY_STR:\n row.append(EMPTY)\n elif s == OCCUPIED_STR:\n row.append(OCCUPIED)\n else:\n raise ValueError(\"Invalid seat label.\")\n if columns == 0 or columns == len(row):\n state.append(row)\n else:\n raise ValueError(\"Every rows needs to have the same length.\")\n if columns == 0:\n columns = len(row)\n return state\n\n\nif __name__ == '__main__':\n if len(sys.argv) == 2:\n input_file = sys.argv[1]\n puzzle_input = read_puzzle_file(input_file)\n task1_solution = task1(puzzle_input)\n print(\"Task 1: {:d}\".format(task1_solution))\n task2_solution = task2(puzzle_input)\n print(\"Task 1: {:d}\".format(task2_solution))\n else:\n print(\"Usage: {:s} puzzle file\".format(sys.argv[0]))\n","repo_name":"jc97/Advent-of-Code-2020","sub_path":"Day_11/day11.py","file_name":"day11.py","file_ext":"py","file_size_in_byte":5375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39799136904","text":"# def factorial(n):\n# if n <= 1:\n# DP[n] = 1\n# return DP[n]\n#\n# if DP[n]:\n# return DP[n]\n#\n# DP[n] = n * factorial(n-1)\n# return DP[n]\n#\n# if __name__ == '__main__':\n# N, K = map(int, input().split())\n# DP = [0] * (N+1)\n# print((factorial(N)//(factorial(K)*factorial(N-K)) % 1000000007))\n\ndef multi(start, end, next):\n result = 1\n for i in range(start, end, next):\n result *= i\n return result\n\nif __name__ == '__main__':\n N, K = map(int, input().split())\n print(multi(N, N-K, -1) // multi(K, 0, -1) % 1000000007)","repo_name":"cheol-95/Algorithm","sub_path":"Python/148. 이항 계수/Binomial coefficient.py","file_name":"Binomial coefficient.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6924194898","text":"from typing import List, Any\nimport make_to_batch.look_up_table as look_up_table\nimport make_to_batch.parser as parser\nimport re\nimport logging\n\n\nclass Makefile:\n \"\"\"The representation of a Makefile.\n\n It is composed of rules and variables. A Makefile starts with no rules and\n no variables.\n\n Attributes\n ----------\n __rules : Dict[str, Dict[List[str], List[str]]\n The rules of the Makefile.\n __variables : Dict[str, Any]\n The variables of the Makefile.\n \"\"\"\n\n def __init__(self):\n \"\"\"Create an empty Makefile\n \"\"\"\n self.__rules = {}\n self.__variables = {}\n\n def parse_file(self, file_content: str) -> None:\n \"\"\"Parse an existing Makefile.\n\n Parameters\n ----------\n file_content : str\n The content of an existing Makefile.\n \"\"\"\n file_content = re.sub(r'''(? List[str]:\n \"\"\"\n Get a rule's prerequisites from a string.\n\n Parameters\n ----------\n string : str\n A string containing the prerequisites.\n\n Returns\n -------\n List[str]\n A list of prerequisites.\n \"\"\"\n string = string.strip()\n string = re.sub(r'''^(?:\\|\\s*)''', '', string)\n prerequisites = string.split(\" \")\n return prerequisites if prerequisites != [\"\"] else []\n\n @staticmethod\n def __recipe_from_string(string: str) -> List[str]:\n \"\"\"Get a rule's recipe from a string.\n\n Parameters\n ----------\n string : str\n A string containing the rule's recipe.\n\n Returns\n -------\n List[str]\n A list of commands forming the recipe.\n \"\"\"\n recipe = string.strip().split('\\n')\n return recipe if recipe != [\"\"] else []\n\n def add_rule(self, target: str, prerequisites: List[str], recipe: List[str]) -> None:\n \"\"\"Add a rule to the Makefile.\n\n Parameters\n ----------\n target : str\n The new target's name.\n prerequisites : List[str]\n The new target's prerequisites.\n recipe : List[str]\n The new target's recipe.\n \"\"\"\n self.__rules[target] = {\n 'prerequisites': prerequisites,\n 'recipe': recipe\n }\n\n def remove_rule(self, target: str) -> None:\n \"\"\"Remove a rule from the Makefile.\n\n If the rule is not in the Makefile, do nothing.\n\n Parameters\n ----------\n target : str\n The target to be removed.\n \"\"\"\n if target in self.__rules:\n del self.__rules[target]\n\n def remove_variable(self, variable: str) -> None:\n \"\"\"Remove a variable from the Makefile.\n\n If the variable is not in the Makefile, do nothing.\n\n Parameters\n ----------\n variable : str\n The variable to be removed.\n \"\"\"\n if variable in self.__rules:\n del self.__variables[variable]\n\n def add_variable(self, name: str, value: Any) -> None:\n \"\"\"Add a variable to the Makefile.\n\n Parameters\n ----------\n name : str\n The new variable's name.\n value : Any\n The new variable's value.\n \"\"\"\n self.__variables[name] = value\n\n @staticmethod\n def __convert_command_to_batch(old_command: str) -> str:\n \"\"\"Convert a Makefile command to a batch command.\n\n Parameters\n ----------\n old_command : str\n The command to be converted.\n\n Returns\n -------\n str\n The equivalent command in batch. If no equivalent command is found, return the starting command.\n \"\"\"\n commands_list = old_command.strip().split(\"&&\")\n batch_commands = []\n\n number_of_dir_changed = 0\n for command in commands_list:\n command = command.strip()\n parsed_command = parser.Parser(command)\n\n logging.info(\"FOUND COMMAND: {}\\n\".format(parsed_command.program) +\n \"\\tOPTIONS: {}\\n\".format(parsed_command.options) +\n \"\\tPARAMETERS: {}\".format(parsed_command.parameters))\n\n match = re.match(r\"^cd (.*?)$\", command)\n if match:\n number_of_dir_changed += 1\n batch_commands.append(\"PUSHD \" + str(match.group(1)))\n continue\n if parsed_command.program in look_up_table.linux_to_dos:\n current_command = look_up_table.linux_to_dos[parsed_command.program]\n options = [current_command['options'][opt] if opt in current_command['options'] else opt for opt in parsed_command.options]\n batch_commands.append(current_command['command'] + \" \" + ' '.join(parsed_command.parameters) + ' ' + ' '.join(options))\n else:\n batch_commands.append(command)\n\n for _ in range(number_of_dir_changed):\n batch_commands.append(\"POPD\")\n\n batch_commands_str = re.sub(r\"\\$[({](.*?)[)}]\", r\"%\\1%\", \" && \".join(batch_commands))\n batch_commands_str = re.sub(r\"%MAKE%\", r\"CALL make.bat\", batch_commands_str)\n return batch_commands_str\n\n def to_batch(self) -> str:\n \"\"\"Convert the Makefile to a Batch file.\n\n Returns\n -------\n str\n The batch file's content.\n \"\"\"\n batch_content = \"@echo off\\n\\n\"\n for var in self.__variables:\n batch_content += \"SET {var}={val}\\n\".format(var=var, val=self.__variables[var])\n\n batch_content += \"\\n\"\n\n for rule in self.__rules:\n batch_content += '''IF /I \"%1\"==\"{rule}\" GOTO {rule}\\n'''.format(rule=rule)\n if \"all\" in self.__rules:\n batch_content += '''IF /I \"%1\"==\"\" GOTO all\\n'''\n batch_content += '''GOTO error\\n'''\n\n batch_content += \"\\n\"\n\n for rule in self.__rules:\n batch_content += \":{}\\n\".format(rule)\n for prerequisite in self.__rules[rule][\"prerequisites\"]:\n batch_content += \"\\tCALL make.bat {}\\n\".format(prerequisite)\n for command in self.__rules[rule][\"recipe\"]:\n batch_content += \"\\t\" + Makefile.__convert_command_to_batch(command) + \"\\n\"\n batch_content += \"\\tGOTO :EOF\\n\\n\"\n\n batch_content += ''':error\n IF \"%1\"==\"\" (\n ECHO make: *** No targets specified and no makefile found. Stop.\n ) ELSE (\n ECHO make: *** No rule to make target '%1%'. Stop.\n )\n GOTO :EOF'''\n\n batch_content += \"\\n\"\n\n return batch_content\n","repo_name":"espositoandrea/Make-to-Batch","sub_path":"make_to_batch/makefile.py","file_name":"makefile.py","file_ext":"py","file_size_in_byte":7657,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"21"} +{"seq_id":"32217713533","text":"#pylint: disable=missing-final-newline,missing-module-docstring,import-error,c0325,c0303,c0301,c0115,c0116,w1401,c0413,w0622,r1705,r1716,w0622,w0611,c0411,e1120,r1710,r0903\nimport requests\nfrom bs4 import BeautifulSoup\n\nclass Scraper:\n def __init__(self,url:str):\n self.url = url\n\n def scrape_site(self):\n page = requests.get(self.url)\n if page.status_code == 200:\n soup = BeautifulSoup(page.content, 'html.parser')\n for script_or_style in soup([\"script\", \"style\"]):\n script_or_style.extract()\n text = soup.get_text()\n # return {' '.join(text.split())}\n return text\n if __name__ == \"__main__\":\n scrape_site()\n","repo_name":"magnusgilje/ce02_ice05_kubernetes","sub_path":"K8s-MLE/k8s-aws-demo-iceberg-lion/circle/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16671218820","text":"import json\nimport os\n\nimport boto3\nfrom datetime import datetime\n\n\ndef lambda_handler(event, context):\n print(event)\n message = json.loads(event['Records'][0]['Sns']['Message'])\n print(\"From SNS: \" + str(message))\n\n dynamodb_client = boto3.resource('dynamodb')\n table = dynamodb_client.Table(os.environ[\"TableName\"])\n print(table)\n bucket_name = os.environ[\"BucketName\"]\n print(bucket_name)\n file_path = message['file_path']\n item_id = bucket_name + \"/\" + file_path\n\n new_name = message['name']\n new_tag = message['tag']\n new_description = message['description']\n\n lastDotIndex = file_path.rfind('.')\n file_type = file_path[lastDotIndex:]\n lastSlashIndex = file_path.rfind('/')\n\n new_id = bucket_name + '/' + file_path[:lastSlashIndex] + '/' + new_name + file_type\n print(\"dosao do try \", new_id, item_id)\n try:\n response = table.get_item(Key={'id': item_id})\n print(response)\n item = response.get('Item')\n if item:\n item['id'] = new_id\n item['name'] = new_name\n item['tag'] = new_tag\n item['description'] = new_description\n item['editTime'] = int(datetime.now().timestamp())\n '''print('new item ',item)\n table.update_item(\n Key={'id': item_id},\n UpdateExpression='SET id = :new_id, #nm = :new_name, tag = :new_tag, description = :new_description, editTime = :new_edit_time',\n ExpressionAttributeValues={\n ':new_id': item['id'],\n ':new_name': item['name'],\n ':new_tag': item['tag'],\n ':new_description': item['description'],\n ':new_edit_time': item['editTime']\n },\n ExpressionAttributeNames={\n '#nm': 'name'\n }\n )'''\n print(item_id)\n table.delete_item(Key={'id': item_id})\n print(item)\n\n response = table.put_item(\n Item=item\n )\n print('zavrsio edit')\n return {\n 'statusCode': 200,\n 'body': f'File \"{file_path}\" is modified successfully.'\n }\n else:\n return {\n 'statusCode': 404,\n 'body': f'File not found: {str(e)}'\n }\n except Exception as e:\n return {\n 'statusCode': 500,\n 'body': f'Error modifying file: {str(e)}'\n }","repo_name":"jovannajdovski/Cloud_photo_album","sub_path":"photo-album/file/edit/edit_metadata_ddb.py","file_name":"edit_metadata_ddb.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"31862856711","text":"class sieunhan:\n stt = 1\n so_thu_tu = 1\n suc_manh = 50\n def __init__(self, para_ten, para_vukhi, para_mau):\n self .ten = \"sieu nhan \" + para_ten\n self .vukhi = para_vukhi\n self .mau = para_mau\n self.stt = sieunhan.so_thu_tu\n sieunhan.so_thu_tu += 1\n def xin_chao(self):\n print( \"xin chao ta la \" + self.ten)\nsieu_nhan_A = sieunhan(\"gao\",\"jozz\",\"den\")\nsieu_nhan_B = sieunhan('xuan','nam','vang')\nprint(sieunhan.suc_manh)\nsieunhan.suc_manh = 40\nsieu_nhan_A.suc_manh = 80\nprint(sieu_nhan_A.suc_manh)\nprint(sieu_nhan_A.stt)\nprint(sieu_nhan_B.stt)\nsieu_nhan_A.xin_chao()","repo_name":"chauhoagnhat/Hoc-Lap-Trinh","sub_path":"opp_1.py","file_name":"opp_1.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1305519263","text":"# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\nInvert Quotes\r\n\r\nPlugin for Sublime Text to convert single quotes to double quotes and vice versa\r\n\r\nCopyright (c) 2012 Frédéric Massart - FMCorz.net\r\n\r\nLicensed under The MIT License\r\nRedistributions of files must retain the above copyright notice.\r\n\r\nhttp://github.com/FMCorz/InvertQuotes\r\n\"\"\"\r\n\r\nimport sublime, sublime_plugin\r\n\r\nclass InvertQuotesCommand(sublime_plugin.TextCommand):\r\n\r\n\tdef run(self, edit, invert_all = False):\r\n\t\tview = self.view\r\n\t\tregions = view.sel()\r\n\r\n\t\tfor region in regions:\r\n\r\n\t\t\tif region.empty():\r\n\t\t\t\twhole_region = self.get_string_region(region)\r\n\t\t\t\tif not whole_region: continue\r\n\t\t\telse:\r\n\t\t\t\twhole_region = region\r\n\t\t\t\t\r\n\t\t\tif not invert_all:\r\n\t\t\t\tbegin = sublime.Region(whole_region.begin(), whole_region.begin() + 1)\r\n\t\t\t\tend = sublime.Region(whole_region.end(), whole_region.end() - 1)\r\n\r\n\t\t\t\tif view.substr(begin) in ('\"', \"'\") and view.substr(end) in ('\"', \"'\"):\r\n\t\t\t\t\tview.replace(edit, begin, \"'\" if view.substr(begin) == '\"' else '\"')\r\n\t\t\t\t\tview.replace(edit, end, \"'\" if view.substr(end) == '\"' else '\"')\r\n\r\n\t\t\telse:\r\n\t\t\t\tfrom_position = whole_region.begin()\r\n\r\n\t\t\t\twhile True:\r\n\t\t\t\t\tsubregion = view.find('\"|\\'', from_position)\r\n\r\n\t\t\t\t\tif not subregion or not whole_region.contains(subregion):\r\n\t\t\t\t\t\tbreak\r\n\r\n\t\t\t\t\tview.replace(edit, subregion, \"'\" if view.substr(subregion) == '\"' else '\"')\r\n\t\t\t\t\tfrom_position = subregion.end()\r\n\r\n\tdef get_string_region(self, region):\r\n\t\tregion_begin = region.begin()\r\n\t\tregion_end = region.end()\r\n\r\n\t\tif self.view.score_selector(region_begin, 'string'):\r\n\t\t\twhile self.view.score_selector(region_begin - 1, 'string'):\r\n\t\t\t\tregion_begin -= 1\r\n\t\t\t\tif region_begin <= 0:\r\n\t\t\t\t\tbreak\r\n\t\t\twhile self.view.score_selector(region_end, 'string'):\r\n\t\t\t\tregion_end += 1\r\n\t\t\t\tif region_end >= self.view.size():\r\n\t\t\t\t\tbreak\r\n\t\t\tregion = sublime.Region(region_begin, region_end)\r\n\r\n\t\telse:\r\n\t\t\tregion = None\r\n\r\n\t\treturn region","repo_name":"FMCorz/InvertQuotes","sub_path":"invert_quotes.py","file_name":"invert_quotes.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"22504212129","text":"from typing import Optional, Tuple, Union\nimport torch\nfrom torch.nn import Module\nfrom transformers import EncoderDecoderModel, AutoTokenizer\nfrom hatespace.models.outputs import ArchetypalTransformerModelOutput\nfrom transformers.modeling_outputs import BaseModelOutputWithPoolingAndCrossAttentions\nfrom hatespace.models.utils import shift_tokens_right\n\nfrom transformers import logging\n\nlogging.set_verbosity_error()\n\n# TODO Make this easy to switch between inner embedder and no inner_embedder (or other inner_embedders for that matter)\n# TODO This guy needs a better name\nclass TransformerArchetypal(EncoderDecoderModel):\n def __init__(\n self,\n encoder_decoder: EncoderDecoderModel,\n inner_embedder: Module,\n tokenizer: AutoTokenizer,\n ) -> None:\n super().__init__(\n config=encoder_decoder.config,\n encoder=encoder_decoder.encoder,\n decoder=encoder_decoder.decoder,\n )\n del encoder_decoder\n\n self.train() # WHY?!?!\n self.decoder.gradient_checkpointing_enable()\n\n self.inner_embedder = inner_embedder\n self.vocab_size = self.decoder.config.vocab_size\n\n self.config.decoder_start_token_id = tokenizer.cls_token_id\n self.config.pad_token_id = tokenizer.pad_token_id\n self.config.vocab_size = self.config.decoder.vocab_size\n self.config.bos_token_id = tokenizer.cls_token_id\n\n @classmethod\n def from_pretrained(\n cls,\n model_name_or_path: Union[str, Tuple[str, str]],\n inner_embedder: Module = None,\n tokenizer: AutoTokenizer = None,\n ) -> \"TransformerArchetypal\":\n if isinstance(model_name_or_path, (tuple, list)):\n encoder_type, decoder_type = model_name_or_path\n else:\n encoder_type = model_name_or_path\n decoder_type = model_name_or_path\n encoder_decoder = EncoderDecoderModel.from_encoder_decoder_pretrained(\n encoder_type, decoder_type\n )\n if inner_embedder is None:\n inner_embedder = ArchetypalHead(512, 769, 12)\n if tokenizer is None:\n tokenizer = AutoTokenizer.from_pretrained(encoder_type)\n return cls(encoder_decoder, inner_embedder, tokenizer)\n\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n decoder_input_ids: Optional[torch.LongTensor] = None,\n decoder_attention_mask: Optional[torch.BoolTensor] = None,\n encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,\n past_key_values: Tuple[Tuple[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n decoder_inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n **kwargs,\n ):\n return_dict = (\n return_dict\n if return_dict is not None\n else self.encoder.config.use_return_dict\n )\n\n kwargs_encoder = {\n argument: value\n for argument, value in kwargs.items()\n if not argument.startswith(\"decoder_\")\n }\n\n kwargs_decoder = {\n argument[len(\"decoder_\") :]: value\n for argument, value in kwargs.items()\n if argument.startswith(\"decoder_\")\n }\n\n if encoder_outputs is None:\n encoder_outputs = self.encoder(\n input_ids=input_ids,\n attention_mask=attention_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n **kwargs_encoder,\n )\n\n predicted_encoder_hidden_states, embeddings = self.inner_embedder(\n encoder_outputs.last_hidden_state\n )\n\n if decoder_input_ids is None:\n decoder_input_ids = shift_tokens_right(\n input_ids, self.config.pad_token_id, self.config.decoder_start_token_id\n )\n\n # When training the values of these are as follows:\n # decoder_input_ids: with shape (batch_size, 512)\n # decoder_attention_mask: with shape (batch_size, 512, 768)\n # predicted_encoder_hidden_states: with shape (batch_size, 512)\n # attention_mask: with shape (batch_size, 512)\n # decoder_inputs_embeds: \n # output_attentions: \n # output_hidden_states: \n # use_cache: \n # past_key_values: True\n\n decoder_outputs = self.decoder(\n input_ids=decoder_input_ids,\n attention_mask=decoder_attention_mask,\n encoder_hidden_states=predicted_encoder_hidden_states,\n encoder_attention_mask=attention_mask,\n inputs_embeds=decoder_inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n use_cache=use_cache,\n past_key_values=past_key_values,\n return_dict=return_dict,\n **kwargs_decoder,\n )\n\n return ArchetypalTransformerModelOutput(\n logits=decoder_outputs.logits,\n embeddings=embeddings,\n past_key_values=decoder_outputs.past_key_values,\n decoder_hidden_states=decoder_outputs.hidden_states,\n decoder_attentions=decoder_outputs.attentions,\n cross_attentions=decoder_outputs.cross_attentions,\n encoder_last_hidden_state=encoder_outputs.last_hidden_state,\n encoder_hidden_states=encoder_outputs.hidden_states,\n encoder_attentions=encoder_outputs.attentions,\n )\n\n def generate_from_sequence(\n self, inputs: torch.Tensor, *args, **kwargs\n ) -> torch.LongTensor:\n if len(inputs.shape) <= 1:\n inputs = torch.unsqueeze(inputs, dim=0)\n return self.generate(input_ids=inputs, *args, **kwargs)\n\n def generate_from_embeddings(\n self, embeddings: torch.Tensor, *args, **kwargs\n ) -> torch.LongTensor:\n intermediate_encodings = self.inner_embedder.decoder(embeddings)\n intermediate_encodings = torch.reshape(\n intermediate_encodings, (embeddings.shape[0], 512, 768)\n )\n intermediate_encodings = BaseModelOutputWithPoolingAndCrossAttentions(\n last_hidden_state=intermediate_encodings\n )\n return self.generate(\n inputs=None, encoder_outputs=intermediate_encodings, *args, **kwargs\n )\n\n\n# TODO Add batch normalization\n# TODO remember to turn off bias on layers just before batch norm\n\n\nclass ArchetypalHead(Module):\n def __init__(\n self, max_token_length: int, token_dimensions: int, num_archetypes: int\n ) -> None:\n super().__init__()\n self.num_archetypes = num_archetypes\n self.max_token_length = max_token_length\n self.token_dimensions = token_dimensions\n self.input_size = max_token_length * token_dimensions\n self.encoder = torch.nn.Sequential(\n torch.nn.Linear(self.input_size, 512),\n torch.nn.ReLU(),\n torch.nn.Linear(512, 512),\n torch.nn.ReLU(),\n torch.nn.Linear(512, self.num_archetypes),\n torch.nn.Softmax(dim=1),\n )\n self.decoder = torch.nn.Sequential(\n torch.nn.Linear(self.num_archetypes, 512),\n torch.nn.ReLU(),\n torch.nn.Linear(512, 512),\n torch.nn.ReLU(),\n torch.nn.Linear(512, self.input_size),\n torch.nn.ReLU(),\n )\n\n def forward(self, x):\n input_shape = x.shape\n x = torch.flatten(x, start_dim=1)\n x = torch.nn.functional.pad(x, (0, 393216 - x.shape[1]))\n embedding = self.encoder(x)\n output = torch.reshape(\n self.decoder(embedding),\n (input_shape[0], self.max_token_length, self.token_dimensions),\n )[:, : input_shape[1], :]\n return (output, embedding)\n","repo_name":"HesitantlyHuman/hatespace","sub_path":"hatespace/models/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8389,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"15937739211","text":"\nfrom tensorflow.python.framework import c_api_util\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.platform import googletest\nclass ApiDefMapTest(test_util.TensorFlowTestCase):\n def testApiDefMapOpNames(self):\n api_def_map = c_api_util.ApiDefMap()\n self.assertIn(\"Add\", api_def_map.op_names())\n def testApiDefMapGet(self):\n api_def_map = c_api_util.ApiDefMap()\n op_def = api_def_map.get_op_def(\"Add\")\n self.assertEqual(op_def.name, \"Add\")\n api_def = api_def_map.get_api_def(\"Add\")\n self.assertEqual(api_def.graph_op_name, \"Add\")\n def testApiDefMapPutThenGet(self):\n api_def_map = c_api_util.ApiDefMap()\n api_def_text = \"\"\"\nop {\n graph_op_name: \"Add\"\n summary: \"Returns x + y element-wise.\"\n description: < HIST_COUNT:\n self.sonar_hist[i].pop(0)\n\n # Find if it's outlier\n outlier = False\n outliers = 0\n for j, dist in enumerate(self.sonar_hist[i]):\n range_thresh = NOISE_LEVEL + MAX_DELTA_PER_READING * j\n if abs(reading - dist) > range_thresh:\n outliers += 1\n if outliers > MAX_OUTLIERS:\n outlier = True\n break\n\n return outlier\n\n # sonar_max_ranges = [0.2, 0.5, 0.55, 0.7, 0.5, 0.5, 0.7, 0.55, 0.5, 0.2] # NOTE: tuning\n sonar_max_ranges = [0.07] + [0.35] * 8 + [0.07] # NOTE: tuning filter\n\n sonars_fwd = [0, 1, 8, 9]\n sonars_angle = [2, 7]\n sonars_right = [3,]\n sonars_left = [6,]\n sonars_bwd = [4, 5]\n\n vel_limit = 0.15\n to_ang = 3.85\n limit_adder = 0.05\n \n\n text = \"\"\n for i in range(len(range_data.data)):\n dist = range_data.data[i] / 1000.0\n if dist <= 0 or dist > sonar_max_ranges[i] or is_outlier(dist, i) is True:\n text += \" \"\n else:\n text += \"{:5.2f} \".format(dist)\n # print(\"SONAR: {}\".format(text))\n\n for i in range(len(range_data.data)):\n dist = range_data.data[i] / 1000.0\n\n\n # Sonar not working (because of it robot slows down\n if i == 8:\n continue\n\n if dist <= 0 or dist > sonar_max_ranges[i] or is_outlier(dist, i) is True:\n # If outlier increse speed\n if i in sonars_angle and i not in sonars_fwd:\n self.safe_vel_lin_fwd += limit_adder / len(sonars_angle)\n\n if i in sonars_bwd:\n self.safe_vel_lin_bwd += limit_adder / len(sonars_bwd)\n\n if i in sonars_right:\n self.safe_vel_ang_ccw += limit_adder * to_ang / len(sonars_right)\n\n if i in sonars_left:\n self.safe_vel_ang_cw += limit_adder * to_ang / len(sonars_left)\n\n continue # Outside trusted range or outlier ignore it\n \n # Set safe velocities\n if i in sonars_fwd:\n # Using min() to not overrite bump sensors limit\n self.safe_vel_lin_fwd = min(vel_limit, self.safe_vel_lin_fwd)\n elif i in sonars_angle:\n if dist > sonar_max_ranges[i] / 2.0:\n self.safe_vel_lin_fwd = min(vel_limit * 2, self.safe_vel_lin_fwd)\n else:\n self.safe_vel_lin_fwd = min(vel_limit, self.safe_vel_lin_fwd)\n \n if i in sonars_bwd:\n self.safe_vel_lin_bwd = min(vel_limit, self.safe_vel_lin_bwd)\n\n if i in sonars_right:\n self.safe_vel_ang_ccw = min(vel_limit * to_ang, self.safe_vel_ang_ccw)\n\n if i in sonars_left:\n self.safe_vel_ang_cw = min(vel_limit * to_ang, self.safe_vel_ang_cw)\n\n\n def bump_callback(self, bump_data):\n \"\"\"\n Refreshes speed limiting depending on bump reading\n \"\"\"\n\n # Converting int16 to string of len 15\n bin_data = '{data:0{width}b}'.format(data=bump_data.bump_bits, width=self.BUMP_SENSORS_COUNT)[::-1]\n\n bump_fwd = [0, 1, 2, 12, 13, 14]\n bump_bwd = [6, 7, 8]\n bump_right = [4, 5]\n bump_left = [9, 10]\n\n vel_limit = 0.01 # Should stop but let's give some movement\n to_ang = 3.85\n limit_adder = 0.05\n\n # text = \"\"\n # for i in range(self.BUMP_SENSORS_COUNT):\n # if bin_data[i] == '1' and self.bin_data_old[i] == '1':\n # text += \"1\"\n # else:\n # text += \"0\"\n # print(\"BUMP: {}\".format(bin_data)) \n\n for i in range(self.BUMP_SENSORS_COUNT):\n # Simple filter. Consider trigger if triggered for 2 frame in a row\n if bin_data[i] == '1' and self.bin_data_old[i] == '1': # If bumped\n # Set safe velocities\n if i in bump_fwd:\n self.safe_vel_lin_fwd = vel_limit\n else:\n self.safe_vel_lin_fwd += limit_adder / len(bump_fwd)\n\n if i in bump_bwd:\n self.safe_vel_lin_bwd = vel_limit\n else:\n self.safe_vel_lin_bwd += limit_adder / len(bump_bwd)\n\n if i in bump_right:\n self.safe_vel_ang_ccw = vel_limit * to_ang\n else:\n self.safe_vel_ang_ccw += limit_adder * to_ang / len(bump_right)\n\n if i in bump_left:\n self.safe_vel_ang_cw = vel_limit * to_ang\n else:\n self.safe_vel_ang_cw += limit_adder * to_ang / len(bump_left)\n\n # Remember previous bin_data\n self.bin_data_old = bin_data\n\n\n def cmd_vel_callback(self, cmd_vel, fake=False):\n '''\n Accepts cmd_vel either from ROS topic or from pasing cmd_vel to this funtion\n If auton_mode is true publishes limited (velocity and accel) cmd_vel setpoint for a pid controller \n Save self.cmd_vel to a variable and updates its freshness\n Limits linear and angular velocities to self.safe_vel_lin and self.safe_vel_ang\n '''\n def limit_cmd_vel(vel, vel_prev):\n '''\n Limits cmd_vel linear and angular speed and acceleration depending on mode\n '''\n TR = self.TELEOP_RATE\n\n if fake is False: # Auton mode\n max_fwd = min(self.MAX_AUTON_VEL_LIN, self.safe_vel_lin_fwd)\n max_bwd = -min(self.MAX_AUTON_VEL_LIN, self.safe_vel_lin_bwd)\n max_cw = -min(self.MAX_AUTON_VEL_ANG, self.safe_vel_ang_cw)\n max_ccw = min(self.MAX_AUTON_VEL_ANG, self.safe_vel_ang_ccw)\n\n vel.linear.x = limit_x(vel.linear.x, vel_prev.linear.x, x_min=max_bwd, x_max=max_fwd, dx_max=self.MAX_AUTON_ACCEL_LIN / TR)\n vel.angular.z = limit_x(vel.angular.z, vel_prev.angular.z, x_min=max_cw, x_max=max_ccw, dx_max=self.MAX_AUTON_ACCEL_ANG / TR)\n else: # RC mode\n max_fwd = min(self.MAX_RC_VEL_LIN, self.safe_vel_lin_fwd)\n max_bwd = -min(self.MAX_RC_VEL_LIN, self.safe_vel_lin_bwd)\n max_cw = -min(self.MAX_RC_VEL_ANG, self.safe_vel_ang_cw)\n max_ccw = min(self.MAX_RC_VEL_ANG, self.safe_vel_ang_ccw)\n\n vel.linear.x = limit_x(vel.linear.x, vel_prev.linear.x, x_min=max_bwd, x_max=max_fwd, dx_max=self.MAX_RC_ACCEL_LIN / TR)\n vel.angular.z = limit_x(vel.angular.z, vel_prev.angular.z, x_min=max_cw, x_max=max_ccw, dx_max=self.MAX_RC_ACCEL_ANG / TR)\n\n return vel\n\n self.cmd_vel = limit_cmd_vel(cmd_vel, self.cmd_vel)\n if fake is False:\n self.fresh_cmd_vel.updated()\n\n # Velocity setpoint\n self.float64.data = self.cmd_vel.linear.x\n self.setpoint_vel_pub.publish(self.float64)\n\n # Yaw speed setpoint\n self.float64.data = self.cmd_vel.angular.z\n self.setpoint_yaw_pub.publish(self.float64)\n\n def odom_callback(self, odom):\n '''\n Publishes state data for PID controller\n Updates that fresh odometry received\n '''\n # State for velocity PID\n self.float64.data = odom.twist.twist.linear.x\n self.state_vel_pub.publish(self.float64)\n\n # State for yaw speed PID\n self.float64.data = odom.twist.twist.angular.z\n self.state_yaw_pub.publish(self.float64)\n\n # Fresh odometry received\n self.fresh_odom.updated()\n\n def pid_vel_callback(self, effort):\n '''\n Updates pid commanded pid_teleop.speed \n '''\n self.pid_teleop.speed = effort.data * 1000\n\n if math.isnan(self.pid_teleop.speed) is True:\n self.pid_teleop.speed = 0\n\n def pid_yaw_callback(self, effort):\n '''\n Updates pid commanded pid_teleop.speed \n '''\n self.pid_teleop.steer = effort.data * 1000\n\n if math.isnan(self.pid_teleop.steer) is True:\n self.pid_teleop.steer = 0\n\n def control_motors(self):\n '''\n Publishes to /teleop speed and steer command for hoverboard motors\n Mode RC: Reads data from /rc\n Mode Autonomous: Reads data from /cmd_vel\n '''\n # In Hz should be atleast 20. Should be as fast as RC input\n # At 50Hz or above sync is lost! (softSerial baud 9600)\n # If faster update rate is needed try higher baud rate for hoverboard serial\n rate = rospy.Rate(self.TELEOP_RATE) # Recommended 40Hz\n while not rospy.is_shutdown(): # ---------------------------Infinite loop\n # -------------------------------- SAFETY ---------------------------------------\n # Disable EKF filter when no odometry available and enable if it's fresh\n # Disable any control because PID requires odometry and if it fails it will go at full speed\n if self.fresh_odom.changed() is True: # On odom freshes change\n is_fresh = self.fresh_odom.is_fresh()\n req = ToggleFilterProcessingRequest(is_fresh)\n try:\n self.toggle_ekf_global_srv(req)\n self.toggle_ekf_local_srv(req)\n except Exception as e:\n rospy.logwarn(\"Can't toggle ekf node: {}\".format(e))\n\n self.reset_pids() # Maybe not needed but to be sure\n self.enable_pids(is_fresh)\n\n if self.fresh_odom.is_fresh() is False:\n self.audio_pub.publish(\"Critical drive disabled\")\n else:\n self.audio_pub.publish(\"Drive enabled\")\n\n if self.fresh_odom.is_fresh() is False:\n # Stop hoverboard don't allow any movement until odometry data is updated\n self.teleop.speed = 0\n self.teleop.steer = 0\n else: # Everything ok. Continue with normal control\n # ------------------------------- CONTROL STATES --------------------------------\n if self.allow_rc is True and self.armed is True: # ---------RC control\n # Check if rc_teleop is not stale\n if self.fresh_rc_teleop.is_fresh() is True:\n self.enable_pids(True)\n else: # rc_teleop stale\n # Zero rc_teleop\n self.rc_teleop.speed = 0\n self.rc_teleop.steer = 0\n self.enable_pids(False)\n\n # self.teleop = self.rc_teleop # Old pure RC control\n\n # --------------------------- PID RC CONTROL ---------------------------\n # Sub and Publish state vel to state_vel topic (in odom_callback())\n\n # Velocity setpoint in m/s\n rc_cmd_vel = Twist()\n rc_cmd_vel.linear.x = self.rc_teleop.speed / 1000.0 * self.MAX_RC_VEL_LIN\n rc_cmd_vel.angular.z = self.rc_teleop.steer / 1000.0 * self.MAX_RC_VEL_ANG\n self.cmd_vel_callback(rc_cmd_vel, fake=True)\n\n # These are updated in callback function\n self.teleop.speed = self.pid_teleop.speed\n self.teleop.steer = self.pid_teleop.steer\n elif self.auton_mode is True: # ----------- Autonomous control\n '''\n Brief: Executes /cmd_vel commands\n Reads /cmd_vel in callback\n Uses PID to find apropriate teleop commands\n Sets teleop commands\n '''\n self.enable_pids(True)\n\n # Check if /cmd_vel is stale and should not be used\n if self.fresh_cmd_vel.is_fresh() is True:\n '''\n Do only if auton_mode is True (set inside cmd_vel_callback function)\n Publish cmd_vel velocity as setpoint for pid controller (cmd_vel_callback())\n Sub and Publish state vel to state_vel topic (odom_callback())\n Sub to effort_vel topic (pid_vel_callback())\n Set effort to teleop\n '''\n self.teleop.speed = self.pid_teleop.speed\n self.teleop.steer = self.pid_teleop.steer\n else: # cmd_vel is stale\n self.reset_pids()\n self.teleop.speed = 0\n self.teleop.steer = 0\n # print('Auton mode: speed: {:2.2f} steer: {:2.2f}'.format(self.teleop.speed, self.teleop.steer))\n else: # ----------------------------------- Fully disarmed no control\n self.teleop.speed = 0\n self.teleop.steer = 0\n self.enable_pids(False)\n\n # Global limit teleop speed and acceleration (in units [-1000..1000])\n TR = self.TELEOP_RATE\n self.teleop.speed = limit_x(self.teleop.speed, self.teleop_prev.speed, x_max_abs=self.MAX_SPEED, dx_max=self.MAX_ACCEL_LIN / TR)\n self.teleop.steer = limit_x(self.teleop.steer, self.teleop_prev.steer, x_max_abs=self.MAX_STEER, dx_max=self.MAX_ACCEL_ANG / TR)\n self.teleop_prev = deepcopy(self.teleop)\n\n # Publish teleop command to hoverboard\n self.teleop_pub.publish(self.teleop)\n\n rate.sleep()\n\n\nif __name__ == '__main__':\n mot_ctrl = MotorsController()\n\n try:\n mot_ctrl.init_ros()\n except rospy.ROSInterruptException:\n pass\n","repo_name":"Combinacijus/Samana-Autonomous-Robot","sub_path":"ROS/samana_ws/src/samana/src/motors_controller.py","file_name":"motors_controller.py","file_ext":"py","file_size_in_byte":21466,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"73773137652","text":"import csv, json, sys\n#if you are not using utf-8 files, remove the next line\n#check if you pass the input file and output file\nif sys.argv[1] is not None and sys.argv[2] is not None:\n\n fileInput = sys.argv[1]\n fileOutput = sys.argv[2]\n\n f = csv.writer(open(fileOutput, \"w+\"))\n f.writerow([\"id\", \"Name\",\"AccountNumber\", \"AcctType\",\"BillingStreet\",\"BillingCity\",\"BillingState\",\"BillingPostalCode\",\"Phone\",\"Fax\",\"Email\",\"Status\"])\n\n with open(fileInput) as fp:\n for line in fp:\n x = json.loads(line)\n f.writerow([x[\"id\"],\n x[\"Name\"][\"Name\"],\n x[\"accountnumber\"],\n x[\"AcctType\"],\n x[\"BillingStreet\"],\n x[\"BillingCity\"][\"city\"],\n x[\"BillingState\"],\n x[\"BillingPostalCode\"][\"zip\"],\n x[\"Phone\"],\n x[\"Fax\"],\n x[\"Email\"],\n x[\"Status\"]]\n )\n fp.close()\n","repo_name":"saagie/demo-customer360","sub_path":"datamodel/sampledata/tblAccount/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33118155890","text":"def titulo(tit='', subtitulo=''):\n print('\\033[7;37m-' * 150, '\\033[m')\n print('\\033[7;37m', tit.strip().center(149).upper(), '\\033[m')\n print('\\033[7;37m-' * 150, '\\033[m')\n if subtitulo != '':\n print(subtitulo.strip().center(150), '\\n')\n\n\ndef menu(*op):\n opcoes = []\n for i, p in enumerate(op):\n opcoes.append(p)\n print(f'{i+1} - {p}'.center(100))\n print()\n while True:\n try:\n resposta = int(input(f'{\"\":<10}Sua opção: '))\n if 0 < resposta <= len(opcoes):\n return resposta\n else:\n print('Digite uma opção válida.'.center(150))\n except KeyboardInterrupt:\n break\n except:\n print('Digite uma opção válida.'.center(150))\n\n\ndef alerta_erro(erro):\n return print(f'\\033[;31m{erro}\\033[m')\n\n\ndef menu_dados(*dados):\n opcoes = []\n for i in dados:\n pergunta = str(input(i))\n if pergunta.isnumeric():\n numero = int(pergunta)\n opcoes.append(numero)\n else:\n opcoes.append(pergunta)\n return opcoes\n\n\n","repo_name":"queelgomes/Cadastro_Consulta_Python","sub_path":"interfaces/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"6880331674","text":"import os\nimport time\nimport openai\nimport json\n\nSEED = 0\nNUM_PROMPTS = 6\nROOT_PATH = os.path.dirname(os.path.abspath(__file__))\nMATH_RESULTS_PATH = os.path.join(ROOT_PATH, \"results\", \"gsm8k-cross\")\nSTRAT_RESULTS_PATH = os.path.join(ROOT_PATH, \"results\", \"strategyqa-cross\")\n\nfor diffifulty in [\"simple\", \"medium\", \"hard\"]:\n FILE_PATH = os.path.join(MATH_RESULTS_PATH, f\"results-{diffifulty}.jsonl\")\n MATH_FILE_PATH = os.path.join(\n MATH_RESULTS_PATH, f\"new-results-{diffifulty}.jsonl\")\n STRAT_FILE_PATH = os.path.join(\n STRAT_RESULTS_PATH, f\"new-results-{diffifulty}.jsonl\")\n\n old_file = list(open(FILE_PATH, \"r\"))\n math_file = open(MATH_FILE_PATH, \"w\")\n strat_file = open(STRAT_FILE_PATH, \"w\")\n\n for i, line in enumerate(old_file):\n if i <= 8100:\n math_file.write(line)\n else:\n strat_file.write(line)\n","repo_name":"raychungno1/CSE5525","sub_path":"project/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"69878006134","text":"from django.urls import path\nfrom agriculture.views import *\n\n\napp_name = 'agriculture'\n\nurlpatterns = [\n path('', agriculture_home, name=\"agriculture_home\"),\n # path('admin/', admin_view, name=\"admin_view\"),\n path('delete//', delete, name='delete'),\n path('json/', show_json, name='show_json'),\n path('add/', add_item, name='add_task'),\n path('single//', single_view, name='single_view')\n]","repo_name":"danielcm585/sCity","sub_path":"agriculture/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"30019590617","text":"from variables import a, b, c, d, e, f, g, h, i, j\n\nwhile True:\n inp = input(\"\\nPlease enter any number in the range from 0 to 10 (enter 0 to exit): \")\n\n if inp == \"0\":\n print(\"Exiting the program.\")\n break\n\n elif int(inp) in range(1, 11):\n k = {\"1\": a, \"2\": b, \"3\": c,\n \"4\": d, \"5\": e, \"6\": f,\n \"7\": g, \"8\": h, \"9\": i, \"10\": j}\n print(k[f\"{inp}\"])\n\n else:\n print(\"\\nYour input isn't correct. Please enter a number between 0 and 10.\")\n","repo_name":"MaxKosPy/Step_Homeworks","sub_path":"hw_6/task_1.py","file_name":"task_1.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31388706641","text":"from flask import request\nfrom flask.wrappers import Response\nfrom tempfile import NamedTemporaryFile\nfrom subprocess import Popen\nimport json\n\nfrom server.resources.api import bp\nimport server.curator\nimport server.trainer\n\n\n@bp.route(\"/helloWorld\", methods=[\"GET\"])\ndef imAlive():\n return Response(\"Hello World!\", 200)\n\n# username: username of user\n# model_name: name of the model to be created\n@bp.route(\"/create_MKO\", methods=[\"POST\"])\ndef create_MKO():\n try:\n data = request.data\n data = json.loads(data)['data']\n model_name = data.get('model_name')\n username = data.get('username')\n claim_check = server.curator.create_mko(model_name, username)\n except Exception as err:\n print(\"Bad request: \",request.data)\n return Response(\"Badly formed request: {}\".format(err), 400)\n\n return ({\"claim_check\": claim_check}, 200)\n\n@bp.route(\"/fill_mko\", methods=[\"POST\"])\ndef fill_data():\n try:\n data = request.data\n data = json.loads(data)['data']\n username = data.get(\"username\")\n model_name = data.get(\"model_name\")\n dataspec = data.get('dataspec')\n topology = data.get(\"topology\", list([]))\n hypers = data.get('hyper_parameters', {})\n mko = data.get('mkodata')\n claim_check = server.curator.fill_mko(username, model_name, mko, dataspec, topology, hypers)\n except Exception as err:\n return Response(\"Badly formed request: {}\".format(err), 400)\n\n return ({\"claim_check\": claim_check}, 200)\n\n\n\n@bp.route(\"/describe_mko\", methods=[\"POST\"])\ndef describe_mko():\n try:\n data = request.data\n data = json.loads(data)['data']\n mko = data.get('mkodata')\n return server.curator.describe_mko(mko)\n except Exception as err:\n return Response(\"Badly formed request: {}\".format(err), 400)\n\n\n@bp.route(\"/train\", methods=[\"POST\"])\ndef train():\n try:\n data = request.data\n data = json.loads(data)['data']\n username = data.get(\"username\")\n model_name = data.get(\"model_name\")\n mko = data.get('mkodata')\n smip_token = data.get('smip_auth').get('token')\n smip_url = data.get('smip_auth').get('url')\n claim_check = server.trainer.train_mko(username, model_name, mko, smip_token, smip_url)\n except Exception as err:\n return Response(\"Badly formed request: {}\".format(err), 400)\n\n return ({\"claim_check\": claim_check}, 200)\n\n\n@bp.route(\"/trainCalibrate\", methods=[\"POST\"])\ndef train_calibrate():\n try:\n data = request.data\n data = json.loads(data)['data']\n username = data.get(\"username\")\n model_name = data.get(\"model_name\")\n mko = data.get('mkodata')\n smip_token = data.get('smip_auth').get('token')\n smip_url = data.get('smip_auth').get('url')\n calibration_point = data.get('calibration_point',\"\")\n desired_mu = data.get('desired_mu', 1.0)\n index = int(data.get('index', 0))\n claim_check = server.trainer.train_mko(username, model_name, mko, smip_token, smip_url,\n autocalibrate=True, calibration_point=calibration_point, desired_mu=desired_mu, index=index)\n except Exception as err:\n return Response(\"Badly formed request: {}\".format(err), 400)\n\n return ({\"claim_check\": claim_check}, 200)","repo_name":"SCOREC/BDA_SM","sub_path":"src/training_manager/server/resources/api/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":3112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20035860149","text":"from __future__ import absolute_import, unicode_literals\nimport json\nimport requests\nimport time\nimport sys\n\nfrom textcelery.tasks import send_text\nfrom app.models import Alert\n\nfrom web3 import Web3\n\nfrom etheralert.settings import ETHERSCAN_API_KEY\n\nURL_ETHERSCAN_API = 'https://api.etherscan.io/api?apikey=' + ETHERSCAN_API_KEY + '&module=proxy&action='\n\n\ndef search_block_for_receiver_addresses(block):\n #Alert.objects.filter(address=tx_address)\n for entry in Alert.objects.all():\n for tx in block['transactions']:\n if tx['to'] is None:\n # case when contract creation is transactino there is no 'to'\n continue\n elif tx['to'].lower() == entry.address.lower():\n print(' sending text to', entry.phone_number, 'for', entry.address)\n tx = convert_tx_hex_to_decimal(tx)\n send_text.apply_async((entry, tx))\n\ndef convert_tx_hex_to_decimal(tx):\n # change hex to int, wei to ETH, Decimal to string before texting\n tx['value'] = str(Web3.fromWei(int(tx['value'], 16), 'ether'))\n tx['blockNumber'] = int(tx['blockNumber'], 16)\n return tx\n\ndef get_etherscan_block_tip_number_hex():\n ACTION = 'eth_blockNumber'\n try:\n r = requests.get(URL_ETHERSCAN_API + ACTION)\n blockNumber = json.loads(r.text)['result']\n except:\n print('api error')\n return False\n # etherscan numbers are hexadecimal. we want decimal.\n return int(blockNumber, 16)\n\ndef get_etherscan_block(blockNumber):\n ACTION = 'eth_getBlockByNumber&boolean=true&tag='\n try:\n r = requests.get(URL_ETHERSCAN_API + ACTION + blockNumber)\n block = json.loads(r.text)['result']\n except:\n print('api error')\n return False\n return block\n\n\nif __name__ == '__main__':\n print('running ethereum')\n firstRun = True\n lastBlockNumber = 0\n\n while True:\n blockNumber = get_etherscan_block_tip_number_hex()\n\n # only get next block if we are at least 2 behind tip (which isn't stable)\n if blockNumber > lastBlockNumber + 1:\n if not firstRun:\n block = get_etherscan_block(hex(lastBlockNumber + 1))\n else:\n block = get_etherscan_block(hex(blockNumber - 1))\n firstRun = False\n\n search_block_for_receiver_addresses(block)\n\n lastBlockNumber = int(block['number'], 16)\n print(lastBlockNumber, block['hash'])\n\n time.sleep(0.5)\n","repo_name":"bartleyg/etheralert","sub_path":"ethereum_block_watcher.py","file_name":"ethereum_block_watcher.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"26011243220","text":"'''\r\nProblem Statement:\r\nWrite a Python program to calculate wind chill index.\r\n'''\r\n\r\n\r\n#getting input for temperatue, and wind speed.\r\nwind= float(input(\"Wind speed (at 10 meter height, in km/ hr):\"))\r\nair_temperature= float(input(\"Speed of Air temperature (in degrees Celsius):\"))\r\n\r\n#calculate\r\nindex= 13.12 +(0.6215*air_temperature)-(11.37*(wind**(0.16)))+(0.3965*air_temperature*(wind**(0.16)))\r\n\r\n#output\r\nprint(f\"The wind chill index is {index}.\" )\r\n\r\n\r\n","repo_name":"ankitacoder3/Python-Basics","sub_path":"Python_Basics/Operators_Input_Library_Functions/Program11a.py","file_name":"Program11a.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"42957328714","text":"import json\nimport random\nfrom ...game_interface import GameInterface, PlayerInterface, GameParameterInterface\nfrom enum import Enum\nimport attrs\nfrom importlib.resources import files\nfrom typing import List, Dict, Any\nfrom ...exceptions import PlaygroundInvalidActionException\n\nfrom nltk.corpus import wordnet as word_corpus_loader\nimport nltk\n\ntry:\n word_corpus_loader.ensure_loaded()\nexcept:\n nltk.download(\"wordnet\")\n\n# Just store corpus as a shared global variable for now\n# Important it's global, becausae we don't want a new\n# corpus per game\nword_corpus = set(word_corpus_loader.words())\n\n\n# TODO: Import these from\nclass Color(str, Enum):\n RED = \"RED\"\n BLUE = \"BLUE\"\n ASSASSIN = \"ASSASSIN\"\n INNOCENT = \"INNOCENT\"\n UNKNOWN = \"UNKNOWN\"\n\n\nclass PlayerType(str, Enum):\n GIVER = \"GIVER\"\n GUESSER = \"GUESSER\"\n\n\n@attrs.define\nclass CodenamesPlayer(PlayerInterface):\n color: Color = None\n type: PlayerType = None\n\n\n@attrs.define(frozen=True)\nclass CodenamesParameters(GameParameterInterface):\n num_players: int = 2\n pass\n\n\n@attrs.define()\nclass Team:\n giver: CodenamesPlayer\n guesser: CodenamesPlayer\n\n\n# TODO: Thread safety\n# DICTIONARY = enchant.Dict(\"en_US\")\n\nBOARD_SIZE = 25\nRED_CARDS = 9\nBLUE_CARDS = 8\n\ncard_list = None\n\n\ndef get_word_board():\n global card_list\n if card_list is None:\n card_list = (\n files(package=\"playgroundrl_envs.games.codenames\")\n .joinpath(\"wordlist.txt\")\n .read_text()\n ).split(\"\\n\")\n\n cards = random.sample(card_list, BOARD_SIZE)\n cards = [c.strip() for c in cards]\n return cards\n\n\ndef get_card_colors():\n coordinates = list(range(BOARD_SIZE))\n random.shuffle(coordinates)\n\n colors = [Color.INNOCENT for _ in range(BOARD_SIZE)]\n\n # Set\n for i in coordinates[:RED_CARDS]:\n colors[i] = Color.RED\n\n for i in coordinates[RED_CARDS : RED_CARDS + BLUE_CARDS]:\n colors[i] = Color.BLUE\n\n i = coordinates[RED_CARDS + BLUE_CARDS]\n colors[i] = Color.ASSASSIN\n return colors\n\n\nclass CodenamesGame(GameInterface):\n def __init__(\n self,\n game_id,\n players,\n game_type,\n parameters: CodenamesParameters,\n self_training=False,\n ):\n super().__init__(\n game_id, parameters, players, game_type, self_training=self_training\n )\n\n self.reward = {player.player_id: 0 for player in players}\n\n self.player_list = players\n self.num_players = parameters.num_players\n\n assert self.num_players == 4 or self.num_players == 2\n\n self.players[0].color = Color.RED\n self.players[1].color = Color.RED\n if self.num_players == 4:\n self.players[2].color = Color.BLUE\n self.players[3].color = Color.BLUE\n\n self.players[0].type = PlayerType.GIVER\n self.players[1].type = PlayerType.GUESSER\n if self.num_players == 4:\n self.players[2].type = PlayerType.GIVER\n self.players[3].type = PlayerType.GUESSER\n\n self.words = get_word_board()\n self.actual_colors = get_card_colors()\n self.guessed_colors = [Color.UNKNOWN] * BOARD_SIZE\n self.player_moving_idx = 0\n\n self.last_clue = \"\"\n self.last_count = 0\n # Number guessed in a row\n self.guessed_count = 0\n\n self.winning_team = None\n\n self.scores = {Color.RED: 0, Color.BLUE: 0}\n self.rewards = {Color.RED: 0, Color.BLUE: 0}\n\n self.is_game_over = False\n\n @property\n def player_moving(self) -> CodenamesPlayer:\n return self.players[self.player_moving_idx]\n\n def increment_turn(self) -> None:\n self.guessed_count = 0\n self.player_moving_idx = (self.player_moving_idx + 1) % self.num_players\n\n def other_team(self, color: Color) -> bool:\n return color.RED if color == color.BLUE else color.BLUE\n\n def validate_guesses(self, guesses: List[int]) -> None:\n seen = set()\n for guess in guesses:\n if type(guess) != int:\n raise PlaygroundInvalidActionException(\"Guess was not an integer\")\n\n if not -1 <= guess < BOARD_SIZE:\n # Guess out of range\n raise PlaygroundInvalidActionException(\n f\"Guess out not in range (-1, {BOARD_SIZE})\"\n )\n\n if guess != -1:\n if self.guessed_colors[guess] != Color.UNKNOWN:\n # Guessed an already guessed square\n raise PlaygroundInvalidActionException(\n \"Guessed a previously-guessed card\"\n )\n\n if guess in seen:\n # Duplicate guess in list\n raise PlaygroundInvalidActionException(\"Duplicate guesses in list\")\n seen.add(guess)\n\n def handle_guesser_action(self, action: Dict[str, Any]) -> bool:\n \"\"\"\n Logic if we received action for guesser\n \"\"\"\n end_turn_automatically = False\n player_color = self.player_moving.color\n\n if \"guess\" in action:\n # Single-guess mode\n guesses = [action[\"guess\"]]\n elif \"guesses\" in action:\n # Multi-guess mode\n end_turn_automatically = True\n guesses = action[\"guesses\"]\n else:\n # Requires one of guess or guesses\n return PlaygroundInvalidActionException(\n \"User must specify one of 'guess' or 'guesses' in socket request.\"\n )\n\n self.validate_guesses(guesses)\n\n # RESET REWARD\n self.reward[player_color] = 0\n for guess_idx, guess in enumerate(guesses):\n if guess == -1:\n # -1, -1 represents finishing your turn\n self.increment_turn()\n break\n else:\n color = self.actual_colors[guess]\n # Update to signify it has been guessed\n self.guessed_colors[guess] = color\n\n if color == Color.ASSASSIN:\n self.is_game_over = True\n self.winning_team = self.other_team(self.player_moving.color)\n self.increment_turn()\n break\n else:\n if color != Color.INNOCENT:\n # Increase score for a team\n self.scores[color] += 1\n\n if color == player_color:\n self.guessed_count += 1\n self.reward[player_color] += 1\n\n if self.guessed_count >= self.last_count + 1:\n # We've guessed more than the count given\n self.increment_turn()\n break\n else:\n keep_guessing = True\n\n elif color == Color.INNOCENT:\n # Penalty for an innocent is -0.5\n self.reward[player_color] -= 0.5\n else:\n # Penalty for opposing team is -1\n self.reward[player_color] -= 1\n\n if color != player_color:\n # If we didn't guess correct,\n # it's the other team's turn\n self.guessed_count = 0\n self.increment_turn()\n break\n\n if (\n self.scores[color.BLUE] == BLUE_CARDS\n or self.scores[color.RED] == RED_CARDS\n ):\n # Stop guessing if game is over\n # It's a little bad we do this check twice\n break\n\n if guess_idx == len(guesses) - 1 and end_turn_automatically:\n # If we've already exhausted the guesses given to us,\n # we should go to the next turn automatically\n self.increment_turn()\n break\n\n # Check for game over conditions\n if self.scores[Color.BLUE] == BLUE_CARDS:\n self.is_game_over = True\n self.winning_team = Color.BLUE\n\n elif self.scores[Color.RED] == RED_CARDS:\n self.is_game_over = True\n self.winning_team = Color.RED\n\n return True\n\n def handle_giver_action(self, action: Dict[str, Any]) -> bool:\n \"\"\"\n Logic if we received action for spymaster player\n \"\"\"\n if \"word\" not in action or \"count\" not in action:\n raise PlaygroundInvalidActionException(\n 'Socket message must be in format \\{\"word\": str, \"count\": int \\}'\n )\n\n word = action[\"word\"].lower().strip()\n\n if \" \" in word:\n raise PlaygroundInvalidActionException(\"Clue must be only a single word\")\n\n if not word.isalpha():\n raise PlaygroundInvalidActionException(\"Word must consist of only letters\")\n\n if not word in word_corpus:\n raise PlaygroundInvalidActionException(\n \"Word not recognized (must belong to NLTK corpus)\"\n )\n\n # Can't use same word as on board\n for board_word in self.words:\n # Check if either is a substring of the other\n if board_word in word or word in board_word:\n raise PlaygroundInvalidActionException(\n \"Word cannot be a substring or superstring of any board word.\"\n )\n\n count = int(action[\"count\"])\n\n if count < 0 or count > 9:\n raise PlaygroundInvalidActionException(\"Count must be between 0 and 9\")\n\n self.last_clue = word\n self.last_count = count\n\n self.increment_turn()\n return True\n\n def submit_action(self, action, player_sid=\"\"):\n \"\"\"\n Callback to handle action\n \"\"\"\n if self.player_moving.sid != player_sid:\n raise PlaygroundInvalidActionException(\"Not your turn.\")\n\n # TODO: This isn't necessary\n action = json.loads(action)\n\n if self.player_moving.type == PlayerType.GIVER:\n return self.handle_giver_action(action)\n else:\n return self.handle_guesser_action(action)\n\n def get_state(self, player_sid=\"\", player_id=-1):\n # TODO: Smarter\n if player_id == -1:\n player_id = 0\n\n player = self.players[player_id]\n state = {\n \"player_moving\": self.player_moving.user_id,\n \"model_name\": self.player_moving.model_name,\n \"player_moving_id\": self.player_moving.player_id,\n \"color\": player.color,\n \"role\": player.type,\n \"words\": self.words,\n \"guessed\": self.guessed_colors,\n \"actual\": self.actual_colors\n if player.type == PlayerType.GIVER\n else self.guessed_colors,\n \"clue\": self.last_clue,\n \"count\": self.last_count,\n \"scores\": self.scores,\n }\n\n # Reward will be from previous round\n return json.dumps(state), self.reward[player_id]\n\n @staticmethod\n def get_game_name():\n return \"codenames\"\n\n @staticmethod\n def get_num_players():\n return 4\n\n def get_outcome(self, player_id):\n if not self.is_game_over:\n return None\n player = self.players[player_id]\n return 1 if self.winning_team == player.color else 0\n\n def get_is_game_over(self):\n return self.is_game_over\n\n def get_player_moving(self):\n return self.player_moving\n","repo_name":"rayankrish/playground_environments","sub_path":"src/playgroundrl_envs/games/codenames/codenames.py","file_name":"codenames.py","file_ext":"py","file_size_in_byte":11534,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"3849538435","text":"from handleData import *\nfrom pipe import *\n\n#Feature Importance#\n#Feature Importance#\nos.chdir('..')\nos.chdir('..')\nos.chdir('..')\nos.chdir('Data')\nfilename = 'micro_world.csv'\ndata = readcsv(filename)\ndata = replace_value(data,['regionwb'],np.NaN,'Non_OECD_Rich')\ndata = replace_value(data, list(data), np.NaN, 0)\ndata = get_q_24(data)\ny_name = 'q24'\n\nclusters = readcsv('sample_country_clusters.csv',0)\nclusters.columns = ['economy','cluster']\ndata = pd.merge(data,clusters,'left','economy')\nbad_inds = [0,2,4,5,48]\n\n\n#index 0 = economy\n#index 4 = random id\n#index 5 = weighting \n#index 48 = follow-up on target\nx_names = [i for j, i in enumerate(list(data)) if j not in bad_inds]\nx_names.remove(y_name)\ny = data[y_name]\nx = data[x_names]\n\nnew_names = list(x)\nnew_names.remove('pop_adult')\nnew_names.remove('age')\nx = create_dummies(x, new_names)\n\n#identify_important_features(x,y, 10, True,'first_pass')\n\n#weights.to_csv(\"weights.csv\",header=True)\n\nx.to_csv(\"x_clusterpass.csv\")\n#y.to_csv(\"y.csv\",header = True)\n","repo_name":"abhig94/CAPP-30254","sub_path":"Project/Pipeline/cluster_pass.py","file_name":"cluster_pass.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42955580040","text":"import socket\n\nclient = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nifopen = client.connect_ex(('127.0.0.1', 2900))\n\nif ifopen == 0:\n print('connected')\n message1 = '1'\n message2 = '2'\n client.send(message1.encode('utf-8'))\n client.send(\"-\".encode('utf-8'))\n client.send(message2.encode('utf-8'))\n data = b''\n while len(data) < 20:\n try:\n packet = client.recv(20 - len(data))\n if not packet:\n break\n data += packet\n except:\n break\n if len(data) == 20:\n print('received message:', data.decode('utf-8'))\n else:\n print('error: received message has incorrect length')\nelse:\n print(\"connection lost\")\nclient.close()\n","repo_name":"AWadowski/pas","sub_path":"lb4/z8.py","file_name":"z8.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8938816508","text":"from django.contrib.auth.forms import PasswordChangeForm\nfrom .forms import *\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom .models import *\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import Group\nfrom django.views.generic import DetailView, CreateView, UpdateView, DeleteView\nfrom django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.http import JsonResponse\nfrom django.conf import settings\nimport itertools\nimport functools\n\n\ndef get_citizen_group():\n citizen_group, created = Group.objects.get_or_create(name='Citizen')\n return citizen_group\n\n\ndef get_organizer_group():\n organizer_group, created2 = Group.objects.get_or_create(name='Organizer')\n return organizer_group\n\n\ndef home(request):\n return render(request, \"meetup/home.html\", {\"title\": \"Home\"})\n\n\ndef about(request):\n return render(request, \"meetup/about.html\", {\"title\": \"About\"})\n\n\ndef register_initial(request):\n if request.user.is_authenticated:\n return redirect('meetup-home')\n return render(request, \"meetup/register_initial.html\",\n {\"title\": \"Register\"})\n\n\ndef register_citizen(request):\n if request.user.is_authenticated:\n return redirect('meetup-home')\n if request.method == 'POST': # If form was filled out and submitted\n form = RegisterCitizenForm(request.POST)\n if form.is_valid():\n form.save() # Creates User. User's Profile and Wallet created\n # automatically\n name = form.cleaned_data.get('username')\n user = User.objects.filter(username=name).first()\n user.groups.add(\n get_citizen_group()) # Assigns User to Citizen group\n\n username = form.cleaned_data.get('username')\n messages.success(request, f\"Hey {username}, welcome to MeetUp!\")\n return redirect(\"meetup-login\")\n else: # If form not submitted just show form for user to fill out\n form = RegisterCitizenForm()\n return render(request, 'meetup/register.html', {'form': form,\n \"title\": \"Register\"})\n\n\ndef register_organizer(request):\n if request.user.is_authenticated:\n return redirect('meetup-home')\n if request.method == 'POST':\n form = RegisterOrganizerForm(request.POST)\n if form.is_valid():\n form.save()\n name = form.cleaned_data.get('username')\n user = User.objects.filter(username=name).first()\n user.groups.add(get_organizer_group())\n\n username = form.cleaned_data.get('username')\n messages.success(request, f\"Hey {username}, welcome to MeetUp!\")\n return redirect(\"meetup-login\")\n else:\n form = RegisterOrganizerForm()\n return render(request, 'meetup/register.html', {'form': form,\n \"title\": \"Register\"})\n\n\ndef register_admin(request):\n if request.user.is_authenticated:\n return redirect('meetup-home')\n if request.method == 'POST':\n form = RegisterAdminForm(request.POST)\n if form.is_valid():\n form.save()\n name = form.cleaned_data.get('username')\n user = User.objects.filter(username=name).first()\n user.is_staff = True\n user.is_superuser = True\n user.groups.add(get_citizen_group())\n user.groups.add(get_organizer_group())\n user.save() # For saving staff and superuser status\n return redirect(\"admin:index\")\n else:\n form = RegisterAdminForm()\n return render(request, 'meetup/register.html', {'form': form,\n \"title\": \"Register Admin\"})\n\n\ndef login_user(request):\n form = LoginForm()\n if request.user.is_authenticated:\n return redirect('meetup-home')\n else:\n if request.method == 'POST':\n form = LoginForm(request.POST)\n username = request.POST.get('username')\n password = request.POST.get('password')\n\n user = authenticate(request, username=username, password=password)\n if user:\n login(request, user)\n messages.success(request,\n f\"You are now logged in. Welcome, \"\n f\"{username}!\")\n return redirect('meetup-home')\n else:\n messages.warning(request, 'Username OR password is incorrect')\n\n context = {'form': form, 'title': \"Login\"}\n return render(request, \"meetup/login.html\", context)\n\n\ndef contact(request):\n if request.method == \"POST\":\n contact = Contact()\n name = request.POST.get('name')\n email = request.POST.get('email')\n subject = request.POST.get('subject')\n contact.name = name\n contact.email = email\n contact.subject = subject\n contact.save()\n messages.success(request, f\"Thank you {contact.name}, \"\n f\"we will be contact with you shortly!\")\n return redirect('meetup-home')\n\n return render(request, 'meetup/contact.html')\n\n\ndef logout_user(request):\n logout(request)\n return redirect('meetup-login')\n\n\n@login_required(login_url='meetup-login')\ndef profile(request):\n list_cc = list(CreditCard.objects.filter(user=request.user).values_list(\n 'credit_card_number', flat=True))\n # Conceal digits except for last 4\n res_list = []\n for cc_num in list_cc:\n stringified = str(cc_num)\n test_list = range(0, len(stringified) - 4)\n repl_char = '*'\n temp = list(stringified)\n res = [repl_char if idx in test_list else ele for idx, ele in\n enumerate(temp)]\n res = ''.join(res)\n res_list.append(res)\n context = {'title': f\"{request.user}'s Profile\",\n 'robucks': Wallet.objects.get(user=request.user).balance,\n 'counter': functools.partial(next, itertools.count()),\n 'numbers': res_list,\n 'credit_cards': CreditCard.objects.filter(user=request.user)}\n\n if request.user.groups.filter(name='Citizen').exists():\n context['events'] = Event.objects.filter(attendees=request.user)\n else:\n context['events'] = Event.objects.filter(user=request.user)\n\n return render(request, \"meetup/profile.html\", context)\n\n\n@login_required(login_url='meetup-login')\ndef profile_update(request):\n if request.method == \"POST\":\n if request.user.groups.filter(name='Citizen').exists() and \\\n request.user.groups.filter(name='Organizer').exists():\n u_form = UpdateAdminForm(request.POST, instance=request.user)\n elif request.user.groups.filter(name='Citizen').exists():\n u_form = UpdateCitizenForm(request.POST, instance=request.user)\n else:\n u_form = UpdateOrganizerForm(request.POST, instance=request.user)\n\n p_form = UpdateProfileForm(request.POST,\n request.FILES,\n instance=request.user.profile)\n\n if u_form.is_valid() and p_form.is_valid():\n u_form.save()\n p_form.save()\n messages.success(request, f\"Your account has been updated!\")\n return redirect(\"meetup-profile\")\n else:\n if request.user.groups.filter(name='Citizen').exists() and \\\n request.user.groups.filter(name='Organizer').exists():\n u_form = UpdateAdminForm(instance=request.user)\n elif request.user.groups.filter(name='Citizen').exists():\n u_form = UpdateCitizenForm(instance=request.user)\n else:\n u_form = UpdateOrganizerForm(instance=request.user)\n\n p_form = UpdateProfileForm(instance=request.user.profile)\n\n context = {'title': f\"{request.user.username}'s Profile\",\n \"u_form\": u_form,\n \"p_form\": p_form\n }\n return render(request, \"meetup/profile_update.html\", context)\n\n\n@login_required(login_url='meetup-login')\ndef password_change(request):\n if request.method == 'POST':\n form = PasswordChangeForm(request.user, request.POST)\n if form.is_valid():\n form.save()\n messages.success(request,\n 'Your password was successfully changed!')\n return redirect('meetup-login')\n else:\n messages.error(request, 'Please correct the error below.')\n else:\n form = PasswordChangeForm(request.user)\n\n context = {'title': f\"Change Password\", \"form\": form}\n return render(request, 'meetup/password_change.html', context)\n\n\n@login_required(login_url='meetup-login')\ndef credit_card(request):\n form = CreditCardForm()\n\n if request.method == 'POST':\n form = CreditCardForm(request.POST)\n if form.is_valid():\n card = form.save(commit=False)\n card.user = request.user\n card.save()\n messages.success(request, \"Credit card added!\")\n return redirect('meetup-profile')\n\n context = {'form': form, 'title': \"Add Credit Card\"}\n return render(request, \"meetup/credit_card.html\", context)\n\n\n@login_required(login_url='meetup-login')\ndef wallet(request):\n user_wallet = Wallet.objects.get(user=request.user)\n context = {'robucks': user_wallet.balance}\n return render(request, \"meetup/wallet.html\", context)\n\n\n@login_required(login_url='meetup-login')\ndef payment(request):\n form = PaymentForm(user=request.user, initial={'amount': 0})\n\n if request.method == 'POST':\n form = PaymentForm(request.POST, user=request.user)\n if form.is_valid():\n user_wallet = Wallet.objects.get(user=request.user)\n amount = form.cleaned_data.get('amount')\n user_wallet.balance += amount\n user_wallet.save(update_fields=['balance'])\n messages.success(request, f\"{amount} Robucks added to wallet!\")\n return redirect('meetup-wallet')\n\n context = {'form': form, \"title\": \"Pay\"}\n return render(request, \"meetup/payment.html\", context)\n\n\ndef events(request):\n form = RegisterEventForm()\n\n if request.method == 'POST':\n form = RegisterEventForm(request.POST)\n if form.is_valid():\n if not request.user.is_authenticated:\n return redirect(\"meetup-register-initial\")\n else:\n event = form.cleaned_data.get('name').first()\n current = User.objects.filter(username=request.user).first()\n # Check if user is already registered for this event\n attendees = event.attendees.all()\n if attendees.filter(id=current.pk).exists():\n messages.warning(request, 'Already registered!')\n else:\n user_wallet = Wallet.objects.filter(user=current).first()\n # Check if user has sufficient funds\n if user_wallet.balance >= event.price:\n user_wallet.balance -= event.price\n user_wallet.save()\n people = event.attendees.all()\n organizer = event.user\n organizer_wallet = Wallet.objects.filter(user=organizer).first()\n organizer_wallet.balance += event.price\n organizer_wallet.save()\n event.attendees.set(people)\n event.attendees.add(current)\n messages.success(request, \"Registered for event!\")\n else:\n messages.warning(request, 'Not enough money!')\n return redirect(\"meetup-events\")\n\n context = {'form': form,\n \"title\": \"Explore Events\",\n \"api_key\": settings.GOOGLE_MAPS_API_KEY}\n return render(request, \"meetup/events.html\", context)\n\n\nclass EventDetailView(DetailView):\n model = Event\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['attendees_list'] = list(self.object.attendees.all())\n context['is_organizer'] = \\\n self.request.user.groups.all().filter(name='Organizer').exists()\n return context\n\n\nclass EventCreateView(SuccessMessageMixin, LoginRequiredMixin, CreateView):\n model = Event\n fields = ['name', 'location', 'date', 'price', 'max_age', 'min_age',\n 'capacity', 'activity_type', 'description', 'contact_info']\n\n def __init__(self):\n super().__init__()\n self.template_name_suffix = \"_create\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['api_key'] = settings.GOOGLE_MAPS_API_KEY\n return context\n\n def form_valid(self, form):\n form.instance.user = self.request.user\n return super().form_valid(form)\n\n success_url = '/profile/'\n success_message = '%(name)s successfully created!'\n\n def get_success_message(self, cleaned_data):\n return self.success_message % dict(cleaned_data, name=self.object.name)\n\n\nclass EventUpdateView(SuccessMessageMixin, LoginRequiredMixin,\n UserPassesTestMixin, UpdateView):\n model = Event\n fields = ['name', 'location', 'date', 'price', 'max_age', 'min_age',\n 'capacity', 'activity_type', 'description', 'contact_info']\n\n def __init__(self):\n super().__init__()\n self.template_name_suffix = \"_create\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['api_key'] = settings.GOOGLE_MAPS_API_KEY\n return context\n\n def form_valid(self, form):\n return super().form_valid(form)\n\n def test_func(self):\n event = self.get_object()\n if self.request.user == event.user:\n return True\n return False\n\n success_url = '/profile/'\n success_message = '%(name)s successfully updated!'\n\n def get_success_message(self, cleaned_data):\n return self.success_message % dict(cleaned_data, name=self.object.name)\n\n\nclass EventDeleteView(SuccessMessageMixin, LoginRequiredMixin,\n UserPassesTestMixin, DeleteView):\n model = Event\n\n def test_func(self):\n event = self.get_object()\n if self.request.user == event.user:\n return True\n return False\n\n success_url = '/profile/'\n success_message = '%(name)s successfully deleted!'\n\n def delete(self, request, *args, **kwargs):\n event = self.get_object()\n messages.success(self.request, self.success_message % event.__dict__)\n return super(EventDeleteView, self).delete(request, *args, **kwargs)\n\n\ndef send_coords(request):\n if request.method == 'GET':\n data = list(Event.objects.all().values_list('location', 'name',\n 'user__first_name', 'date',\n 'price', 'max_age',\n 'min_age', 'capacity',\n 'activity_type',\n 'description',\n 'contact_info',\n 'attendees', 'id'))\n return JsonResponse(data, safe=False)\n","repo_name":"alvinjiang32/CS_160","sub_path":"meetup/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19394927380","text":"# -*- coding:UTF-8 -*-\n\"\"\"\n微博收藏夹图片爬虫\nhttps://www.weibo.com/\n@author: hikaru\nemail: hikaru870806@hotmail.com\n如有问题或建议请联系\n\"\"\"\nimport os\nimport re\nimport urllib.parse\nfrom pyquery import PyQuery as pq\nfrom common import *\nfrom project.weibo import weibo\n\n\n# 获取一页的收藏微博ge\ndef get_one_page_favorite(page_count):\n # https://www.weibo.com/fav?page=1\n favorite_pagination_url = \"https://www.weibo.com/fav\"\n query_data = {\"page\": page_count}\n cookies = {\"SUB\": weibo.COOKIES[\"SUB\"]}\n favorite_pagination_response = net.Request(favorite_pagination_url, method=\"GET\", fields=query_data, cookies=cookies)\n result = {\n \"blog_info_list\": [], # 所有微博信息\n \"delete_blog_id_list\": [], # 全部已删除的微博ID\n \"is_over\": False, # 是否最后一页收藏\n }\n if favorite_pagination_response.status != const.ResponseCode.SUCCEED:\n raise CrawlerException(crawler.request_failre(favorite_pagination_response.status))\n favorite_data_html = tool.find_sub_string(favorite_pagination_response.content, '\"ns\":\"pl.content.favoriteFeed.index\"', '\"})', const.IncludeStringMode.END)\n favorite_data_html = tool.find_sub_string(favorite_data_html, '\"html\":\"', '\"})')\n if not favorite_data_html:\n raise CrawlerException(\"页面截取收藏信息失败\\n\" + favorite_pagination_response.content)\n # 替换全部转义斜杠以及没有用的换行符等\n html_data = favorite_data_html.replace(r\"\\\\\", chr(1))\n for replace_string in [r\"\\n\", r\"\\r\", r\"\\t\", \"\\\\\"]:\n html_data = html_data.replace(replace_string, \"\")\n html_data = html_data.replace(chr(1), \"\\\\\")\n # 解析页面\n children_selector = pq(html_data).find(\"div.WB_feed\").children()\n if children_selector.length == 0:\n raise CrawlerException(\"匹配收藏信息失败\\n\" + favorite_data_html)\n if children_selector.length == 1:\n raise CrawlerException(\"没有收藏了\")\n # 解析日志id和图片地址\n for i in range(children_selector.length - 1):\n result_blog_info = {\n \"blog_id\": 0, # 日志id(mid)\n \"photo_url_list\": [], # 所有图片地址\n }\n feed_selector = children_selector.eq(i)\n # 已被删除的微博\n if not feed_selector.has_class(\"WB_feed_type\"):\n if feed_selector.attr(\"mid\"):\n result[\"delete_blog_id_list\"].append(feed_selector.attr(\"mid\"))\n elif feed_selector.find(\".WB_empty\").length == 1:\n result[\"delete_blog_id_list\"].append(feed_selector.find(\".WB_empty\").attr(\"mid\"))\n continue\n # 解析日志id\n blog_id = feed_selector.attr(\"mid\")\n if not tool.is_integer(blog_id):\n raise CrawlerException(\"收藏信息解析微博id失败\\n\" + feed_selector.html())\n result_blog_info[\"blog_id\"] = int(blog_id)\n # WB_text 微博文本\n # WB_media_wrap 微博媒体(图片)\n # .WB_feed_expand .WB_expand 转发的微博,下面同样包含WB_text、WB_media_wrap这些结构\n # 包含转发微博\n if feed_selector.find(\".WB_feed_expand .WB_expand\").length == 0:\n media_selector = feed_selector.find(\".WB_media_wrap\")\n else:\n media_selector = feed_selector.find(\".WB_feed_expand .WB_expand .WB_media_wrap\")\n # 如果存在媒体\n if media_selector.length == 1:\n thumb_photo_url_list = re.findall(r'', media_selector.html())\n if len(thumb_photo_url_list) > 0:\n photo_url_list = []\n for photo_url in thumb_photo_url_list:\n # https://wx3.sinaimg.cn/mw2000/e212e359gy1hdzbgobbpvj20lc0sgtce.jpg\n # ->\n # https://wx3.sinaimg.cn/large/e212e359gy1hdzbgobbpvj20lc0sgtce.jpg\n url_split_result = url.split_path(photo_url)\n url_split_result[0] = \"large\"\n url_split_result.insert(0, \"\")\n photo_url_list.append(urllib.parse.urljoin(photo_url, \"/\".join(url_split_result)))\n result_blog_info[\"photo_url_list\"] = photo_url_list\n if len(result_blog_info[\"photo_url_list\"]) > 0:\n result[\"blog_info_list\"].append(result_blog_info)\n # 最后一条feed是分页信息\n page_selector = children_selector.eq(children_selector.length - 1)\n # 判断是不是最后一页\n page_count_find = re.findall(r\"第(\\d*)页\", page_selector.html())\n if len(page_count_find) > 0:\n page_count_find = list(map(int, page_count_find))\n result[\"is_over\"] = page_count >= max(page_count_find)\n else:\n result[\"is_over\"] = True\n return result\n\n\ndef delete_favorite(blog_id):\n api_url = \" https://weibo.com/aj/fav/mblog/del?ajwvr=6\"\n post_data = {\n \"mid\": blog_id,\n \"location\": \"v6_fav\"\n }\n headers = {\n \"Origin\": \"https://weibo.com\",\n \"Referer\": \"https://weibo.com/fav\",\n }\n cookies = {\"SUB\": weibo.COOKIES[\"SUB\"]}\n api_response = net.Request(api_url, method=\"POST\", fields=post_data, cookies=cookies, headers=headers).enable_json_decode()\n if api_response.status != const.ResponseCode.SUCCEED:\n raise CrawlerException(crawler.request_failre(api_response.status))\n crawler.get_json_value(api_response.json_data, \"code\", type_check=int, value_check=100000)\n\n\nclass Favorite(crawler.Crawler):\n def __init__(self, **kwargs):\n # 设置APP目录\n crawler.PROJECT_APP_PATH = os.path.abspath(os.path.dirname(__file__))\n\n # 初始化参数\n sys_config = {\n const.SysConfigKey.DOWNLOAD_PHOTO: True,\n const.SysConfigKey.NOT_CHECK_SAVE_DATA: True,\n const.SysConfigKey.GET_COOKIE: (\"sina.com.cn\", \"login.sina.com.cn\"),\n }\n crawler.Crawler.__init__(self, sys_config, **kwargs)\n\n # 设置全局变量,供子线程调用\n weibo.COOKIES.update(self.cookie_value)\n\n # 检测登录状态\n if not weibo.check_login():\n # 如果没有获得登录相关的cookie,则模拟登录并更新cookie\n if weibo.init_session() and weibo.check_login():\n pass\n else:\n log.error(\"没有检测到登录信息\")\n tool.process_exit()\n\n def main(self):\n page_count = 1\n is_over = False\n while not is_over:\n favorite_pagination_description = f\"第{page_count}页收藏\"\n self.start_parse(favorite_pagination_description)\n try:\n favorite_pagination_response = get_one_page_favorite(page_count)\n except CrawlerException as e:\n log.error(e.http_error(favorite_pagination_description))\n raise\n self.parse_result(favorite_pagination_description + \"已删除微博\", favorite_pagination_response[\"delete_blog_id_list\"])\n\n for blog_id in favorite_pagination_response[\"delete_blog_id_list\"]:\n blog_description = f\"微博{blog_id}\"\n log.info(f\"开始删除 {blog_description}\")\n try:\n delete_favorite(blog_id)\n except CrawlerException as e:\n log.error(e.http_error(blog_description))\n raise\n log.info(f\"{blog_description} 删除成功\")\n\n self.parse_result(favorite_pagination_description, favorite_pagination_response[\"blog_info_list\"])\n\n for blog_info in favorite_pagination_response[\"blog_info_list\"]:\n blog_description = f\"微博{blog_info['blog_id']}\"\n self.start_parse(blog_description)\n self.parse_result(blog_description, blog_info[\"photo_url_list\"])\n\n photo_count = 1\n photo_path = os.path.join(self.photo_download_path, blog_info[\"blog_id\"])\n for photo_url in blog_info[\"photo_url_list\"]:\n photo_path = os.path.join(photo_path, f\"%02d.{url.get_file_ext(photo_url)}\" % photo_count)\n photo_description = f\"微博{blog_info['blog_id']}第{photo_count}张图片\"\n if self.download(photo_url, photo_path, photo_description, success_callback=self.download_success_callback):\n self.total_photo_count += 1\n photo_count += 1\n\n if favorite_pagination_response[\"is_over\"]:\n is_over = True\n else:\n page_count += 1\n\n self.end_message()\n\n def download_success_callback(self, photo_url, photo_path, photo_description, download_return):\n if weibo.check_photo_invalid(photo_path):\n path.delete_dir_or_file(photo_path)\n log.error(f\"{photo_description} {photo_url} 已被屏蔽,删除\")\n return False\n return True\n\n\nif __name__ == \"__main__\":\n Favorite().main()\n","repo_name":"yxw19870806/Py3Crawler","sub_path":"project/weibo/favorite.py","file_name":"favorite.py","file_ext":"py","file_size_in_byte":8981,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"38785230000","text":"from tkinter import *\nroot=Tk()\nroot.title(\"my frist program\")\nroot.geometry('350x200')\nleb1=Label(root, text=\"HI i am surajit\")\nleb1.grid()\n# function to display text when\n# button is clicked\ndef clicked():\n leb1.configure(text = \"I just got clicked\")\n# button widget with red color text\n# inside\nbtn=Button(root, text='cleck me', fg='red', command=clicked)\nbtn.grid(column=1, row=0)\nroot.mainloop()","repo_name":"surajit0609/tkinter","sub_path":"changingcode.py","file_name":"changingcode.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41986669024","text":"from app.configs.database import db\nfrom app.models import Products, Cart, CartProducts\nfrom sqlalchemy import func\n\n\ndef retrieve_products_in_cart(cart_id, dict=True):\n session = db.session\n cart_products = (\n session.query(\n Products.id,\n Products.name,\n func.sum(Products.price).label(\"price\"),\n func.count(Products.name).label(\"quantidade\"),\n # Products.price,\n Products.img,\n )\n .select_from(Cart)\n .join(CartProducts)\n .join(Products)\n .filter(Cart.id == cart_id)\n .group_by(Products.name, Products.price, Products.img, Products.id)\n .all()\n )\n\n if dict:\n print(\"TA CHEGANDO\")\n return [product._asdict() for product in cart_products]\n\n return cart_products\n","repo_name":"wagnerfabricio/capstone_grup2","sub_path":"app/services/cart_service.py","file_name":"cart_service.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"12467935107","text":"# GCD of largest common division\r\ndef gcd(a, b):\r\n\tif b == 0:\r\n\t\treturn a \r\n\t\t# after module a%b becomes smaller, so we keep on the right side of 'b'\r\n\treturn gcd(b, a%b)\r\n\r\na = 17\r\nb = 19\r\nprint(gcd(a, b))","repo_name":"Parvez13/Placement_Assignment-Sohail_Parvez-","sub_path":"Pre_Placement/DSA_Lectures/Recursion_Lecture_2/question_2.py","file_name":"question_2.py","file_ext":"py","file_size_in_byte":206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20969303512","text":"class Solution:\n # 53. Maximum Subarray\n # Input: nums = [-2,1,-3,4,-1,2,1,-5,4]\n # Output: 6\n # Explanation: [4,-1,2,1] has the largest sum = 6.\n # Input: nums = [1]\n # Output: 1\n # Input: nums = [5,4,-1,7,8]\n # Output: 23\n # https://leetcode.com/problems/maximum-subarray/\n def maxSubArray(self, nums):\n if not nums:\n return 0\n curSum = maxSum = nums[0]\n for num in nums[1:]:\n tempsum = curSum + num #到这个点为止最优的所有index的合\n curSum = max(num, tempsum) #比较是加上num后大(有负数)还是原本数字大。取value大的为新的current\n maxSum = max(maxSum, curSum) #更新最优的总数\n\n return maxSum\n\n\n # 70. Climbing Stairs\n # Input: n = 2\n # Output: 2\n # Explanation: There are two ways to climb to the top.\n # 1. 1 step + 1 step\n # 2. 2 steps\n # Input: n = 3\n # Output: 3\n # Explanation: There are three ways to climb to the top.\n # 1. 1 step + 1 step + 1 step\n # 2. 1 step + 2 steps\n # 3. 2 steps + 1 step\n\n\n def climbStairs(self, n, d={1: 1, 2: 2}):\n if n == 0:\n return 0\n if n not in d:\n d[n] = self.climbStairs(n - 1, d) + self.climbStairs(n - 2, d) #dictionary里只有1和2\n return d[n] #按值返回\n\n\n # 392. Is Subsequence\n # https://leetcode.com/problems/is-subsequence/\n\n\n def isSubsequence(self, s, t):\n i = 0\n j = 0\n while (j < len(t) and i < len(s)):\n if (s[i] == t[j]): #如果找到了i和j一块移动\n i += 1\n j += 1\n else:\n j += 1 # 没有找到的情况下只移动j\n if (i == len(s)):\n return True\n return False\n\n\n # 62. Unique Paths\n # Input: m = 3, n = 2\n # Output: 3\n # Explanation: From the top-left corner, there are a total of 3 ways to reach the bottom-right corner:\n # 1. Right -> Down -> Down\n # 2. Down -> Down -> Right\n # 3. Down -> Right -> Down\n\n\n def uniquePaths(self, m, n): # n为x轴 m为y轴\n dp = [[0 for _ in range(n)] for _ in range(m)]\n for c in range(n):\n dp[0][c] = 1\n\n for r in range(m):\n dp[r][0] = 1\n\n for r in range(1,m):\n for c in range(1,n):\n dp[r][c] = dp[r-1][c] + dp[r][c-1]\n return dp[-1][-1]\n\n\n # 64. Minimum Path Sum\n # Input: grid = [[1,3,1],[1,5,1],[4,2,1]]\n # Output: 7\n # Input: grid = [[1,2,3],[4,5,6]]\n # Output: 12\n # https://leetcode.com/problems/minimum-path-sum/\n\n def minPathSum(self, grid):\n m = len(grid)\n n = len(grid[0])\n\n for c in range(1, n): # 因为只能向下或往右将边缘的路径算好\n grid[0][c] += grid[0][c - 1]\n\n for r in range(1, m):\n grid[r][0] += grid[r - 1][0]\n\n for r in range(1, m):\n for c in range(1, n): # 将中间的填充好是上面或者右边过来\n grid[r][c] += min(grid[r - 1][c], grid[r][c - 1])\n return grid[-1][-1]\n\n\n # 63. Unique Paths II\n # Input: obstacleGrid = [[0,0,0],[0,1,0],[0,0,0]]\n # Output: 2\n # Explanation: There is one obstacle in the middle of the 3x3 grid above.\n # There are two ways to reach the bottom-right corner:\n # 1. Right -> Right -> Down -> Down\n # 2. Down -> Down -> Right -> Right\n\n\n def uniquePathsWithObstacles(self, obstacleGrid):\n m = len(obstacleGrid)\n n = len(obstacleGrid[0])\n dp = [[0 for _ in range(n)] for _ in range(m)]\n\n for c in range(n):\n if obstacleGrid[0][c] == 1: break\n dp[0][c] = 1\n\n for r in range(m):\n if obstacleGrid[r][0] == 1: break\n dp[r][0] = 1\n\n for r in range(1,m):\n for c in range(1,n):\n if obstacleGrid[r][c]== 1:\n dp[r][c]=0\n else:\n dp[r][c] = dp[r-1][c] + dp[r][c-1]\n return dp[-1][-1]\n\n\n # 120. Triangle\n # Input: triangle = [[2],[3,4],[6,5,7],[4,1,8,3]]\n # Output: 11\n # Explanation: The triangle looks like:\n # 2\n # 3 4\n # 6 5 7\n # 4 1 8 3\n # The minimum path sum from top to bottom is 2 + 3 + 5 + 1 = 11 (underlined above).\n\n\n def minimumTotal(self, triangle):\n dp = [0]*(len(triangle)+1)\n\n for row in triangle[::-1]: #倒转过来从底下开始\n for i,n in enumerate(row): #访问index和item。因为是三角形长度会越来越短\n dp[i] = n + min(dp[i],dp[i+1]) #当前的点的值加上最小的\n\n return dp[0]\n\n # 279. Perfect Squares\n # Input: n = 12\n # Output: 3\n # Explanation: 12 = 4 + 4 + 4.\n # Input: n = 13\n # Output: 2\n # Explanation: 13 = 4 + 9.\n\n def numSquares(self, n):\n dp = [n] * (n+1)\n dp[0] = 0\n for target in range(1,n+1): # 从1开始一直算到target上\n for s in range (1,target+1):\n square = s*s # square\n if target - square < 0: # 如果小于0停止\n break\n dp[target]=min(dp[target],1+dp[target-square])\n return dp[n] #做coin changed problem\n\n\n # 139. Word Break\n # Input: s = \"leetcode\", wordDict = [\"leet\",\"code\"]\n # Output: true\n # Input: s = \"applepenapple\", wordDict = [\"apple\",\"pen\"]\n # Output: true\n # Input: s = \"catsandog\", wordDict = [\"cats\",\"dog\",\"sand\",\"and\",\"cat\"]\n # Output: false\n # https://leetcode.com/problems/word-break/\n\n\n\n def wordBreak(self, s, wordDict):\n dp = [False]*(len(s)+1)\n dp[len(s)] = True #最后一个肯定是true\n for i in range (len(s),-1,-1): #倒序\n for w in wordDict:\n if i+len(w) <= len(s) and s[i:i+len(w)] == w : #构造的长度得小于s的长度同时substring得等于w\n dp[i] = dp[i+len(w)] #将这个点开始的设置为true利用加长度\n if dp[i]: #不需要再找别的word\n break\n return dp[0]\n\n\n # 322. Coin Change\n # Input: coins = [1,2,5], amount = 11\n # Output: 3\n # Input: coins = [2], amount = 3\n # Output: -1\n def coinChange(self, coins, amount):\n dp = [amount+1]*(amount+1)\n dp[0]=0\n for a in range(1,amount+1):\n for c in coins: #遍历所有的硬币的大小\n if a - c >= 0: #如果是大于0可能就是一种接法\n dp[a] = min(dp[a],1+dp[a-c]) #更新��优解\n return dp[amount] if dp[amount]!= amount + 1 else -1\n\n # 72. Edit Distance\n # Given two strings word1 and word2, return the minimum number of\n # operations required to convert word1 to word2.\n # You have the following three operations permitted on a word:\n #\n # Insert a character\n # Delete a character\n # Replace a character\n\n # Input: word1 = \"horse\", word2 = \"ros\"\n # Output: 3\n # Explanation:\n # horse -> rorse (replace 'h' with 'r')\n # rorse -> rose (remove 'r')\n # rose -> ros (remove 'e')\n\n # https://leetcode.com/problems/edit-distance/\n\n\n\n def minDistance(self, word1, word2):\n dp = [[float(\"inf\")] * (len(word2)+1) for i in range(len(word1)+1)]\n\n for c in range(len(word2)+1):\n dp[len(word1)][c] = len(word2) - c #word1 = \"\"是空的到word2的距离就是加多少个词\n\n for r in range(len(word1)+1):\n dp[r][len(word2)] = len(word1) - r #word2 = \"\"是空的到word1的距离就是加多少个词\n\n for r in range(len(word1)-1,-1,-1):\n for c in range(len(word2)-1,-1,-1):\n if word1[r] == word2[c]: #如果两个值相同\n dp[r][c] = dp[r+1][c+1]\n else: # 1 more step delete insert replace\n dp[r][c] = 1+ min(dp[r+1][c],dp[r][c+1],dp[r+1][c+1])\n\n return dp[0][0]\n\n\n # 97. Interleaving String\n # Input: s1 = \"aabcc\", s2 = \"dbbca\", s3 = \"aadbbcbcac\"\n # Output: true\n # Input: s1 = \"aabcc\", s2 = \"dbbca\", s3 = \"aadbbbaccc\"\n # Output: false\n # https://leetcode.com/problems/interleaving-string/\n # s1 s2 到了最后就out of bound 就是base case\n def isInterleave(self, s1, s2, s3):\n dp = [[False]*(len(s2)+1) for _ in range(len(s1)+1)]\n dp[len(s1)][len(s2)]=True\n for i in range(len(s1),-1,-1):\n for j in range(len(s2),-1,-1):\n if i0 and c>0 :#保护edge case\n if matrix[r][c] == \"1\":\n dp[r][c] = 1 + min(dp[r][c-1],dp[r-1][c],dp[r-1][c-1])\n else: #匹配到最小的。如果是正方形的时三个方向都会是一样的值\n dp[r][c] = 0\n maxsquare = max(maxsquare,dp[r][c])\n return maxsquare * maxsquare\n\n\n\n # 198. House Robber\n # Input: nums = [1,2,3,1]\n # Output: 4\n # Input: nums = [2,7,9,3,1]\n # Output: 12\n # https://leetcode.com/problems/house-robber/\n\n\n def rob(self, nums):\n # [rob1,rob2,n,n+1,...]\n rob1,rob2 = 0,0 # max can rob for the pervous two house\n for n in nums: # current house that we are at / do not rob\n temp = max(n+rob1,rob2) # 比较大小当前抢的大还是跳过大\n rob1 = rob2 #rob1 移动到下一个点\n rob2 = temp #rob2 记录当前最大值\n return rob2\n\n # 213. House Robber II\n # Input: nums = [2,3,2]\n # Output: 3\n # Input: nums = [1,2,3,1]\n # Output: 4\n # https://leetcode.com/problems/house-robber-ii/\n\n\n\n def rob2(self,nums):\n return max(nums[0],self.rob(nums[1:]),self.rob(nums[:-1]))\n # 尾巴和头不能连接将问题分为两个subarray来看找最大的\n\n\n # 91. Decode Ways\n # Input: s = \"12\"\n # Output: 2\n # Explanation: \"12\" could be decoded as \"AB\" (1 2) or \"L\" (12).\n # Input: s = \"226\"\n # Output: 3\n # Explanation: \"226\" could be decoded as \"BZ\" (2 26), \"VF\" (22 6), or \"BBF\" (2 2 6).\n # Input: s = \"06\"\n # Output: 0\n # Explanation: \"06\" cannot be mapped to \"F\" because of the leading zero (\"6\" is different from \"06\").\n # https://leetcode.com/problems/decode-ways/\n\n def numDecodings(self, s):\n dp = {len(s):1} #base case原始���定是1\n for i in range(len(s)-1,-1,-1):\n if s[i] == \"0\": #如果第一个是0这个位置的组合是0\n dp[i] = 0\n else:\n dp[i]=dp[i+1] #数自己本身就是个组合所以跟随前面的累加\n\n if (i+1 numero:\n mayormenor = \"Es más grande. prueba de nuevo.\"\n if nalea < numero:\n mayormenor = \"Es más pequeño. prueba de nuevo.\"\n return f\"\"\"\n\n \n \n Juego Adivinación\n \n \n \n
\n
\n

¡Has Fallado!

\n

{nalea}

\n

{mayormenor}

\n

Llevas {intentos} intentos.

\n
\n
\n \n

Presione entrar para validar.

\n \n \n
\n
\n\n \n \n \"\"\"\n else:\n return f\"\"\"\n \n \n \n Juego Adivinación\n \n \n \n
\n
\n

¡HAS ACERTADO!

\n

El número era {nalea}

\n

Has necesitado {intentos} intentos para acertarlo.

\n
\n \n
\n
\n
\n \n \n \"\"\"\n\n\n@app.route('/anagrama')\ndef anagrama():\n return render_template('anagrama1.html')\n\n\n@app.route('/anagrama/validar', methods=['POST'])\ndef validarAna():\n textoaPagWeb= request.form[\"textoa\"]\n textobPagWeb= request.form[\"textob\"]\n resultado=anagramas(textoaPagWeb, textobPagWeb)\n return render_template('anagrama2.html', resultado=resultado)\n\n\n@app.route('/areaperi')\ndef areaperi():\n return render_template('areaPeri1.html')\n\n\n@app.route('/areaperi/validar', methods=['POST'])\ndef validarAreaperi():\n radio = request.form[\"radio\"]\n resultado = CalcularAreaPer(radio)\n return render_template('areaPeri2.html', resultado=resultado)\n\n\n@app.route('/celFah')\ndef celFah():\n return render_template('celFah1.html')\n\n\n@app.route('/celFah/validar', methods=['POST'])\ndef validarCelFah():\n grado = request.form[\"grado\"]\n resultado = celAfah(grado)\n return render_template('celFah2.html', resultado=resultado)\n\n\n@app.route('/parImpar')\ndef parImp():\n return render_template('parImpar1.html')\n\n\n@app.route('/parImpar/validar', methods=['POST'])\ndef validarParImp():\n numero = request.form[\"num\"]\n resultado = parImpar(numero)\n return render_template('parImpar2.html', resultado=resultado)\n\n@app.route('/capicua')\ndef capi():\n return render_template('capicua1.html')\n\n\n@app.route('/capicua/validar', methods=['POST'])\ndef cvalidarCapi():\n numeroPagWeb= request.form[\"numero\"]\n resultado=capicua(numeroPagWeb)\n return render_template('capicua2.html', resultado=resultado)\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"xsha256/Docker","sub_path":"menu/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4318,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14575297248","text":"import psycopg2\r\n\r\nimport Environment\r\nimport utils\r\nimport utils_sec\r\nfrom sql_runner.parallel_runner.main import parallel_run\r\nfrom utils import get_cluster_databases\r\n\r\ndef max_labels(host, port, db, return_dict):\r\n try:\r\n conn = psycopg2.connect(\r\n host=host,\r\n port=port,\r\n database=db,\r\n user='postgres',\r\n password=utils_sec.password_from_file('postgres', host, port))\r\n except Exception as e:\r\n print(f'{port}|{db}: {e}')\r\n return\r\n cur = conn.cursor()\r\n try:\r\n cur.execute(\"\"\"SELECT\r\n max( \r\n CASE WHEN position(',' IN labels) > 0 THEN\r\n split_part(labels, ',', 2) \r\n ELSE labels\r\n END) labels\r\n FROM public.databasechangelog \r\n WHERE labels IS NOT NULL\"\"\")\r\n record = cur.fetchone()[0]\r\n except Exception as e:\r\n record = ' '\r\n conn.close()\r\n return_dict[f'{port}|{db}'] = record\r\n\r\n\r\nif __name__ == '__main__':\r\n env = 'test'\r\n databases = get_cluster_databases(env)[0:]\r\n #databases = Repository.get_db_names_by_group('JAKARTA')\r\n #databases = ['enforcement_detection']\r\n port = Environment.get_port_from_env_repo(env)\r\n ports = list(range(port, port+1))\r\n return_dict = parallel_run(ports, databases, max_labels)\r\n utils.print_one_result(return_dict, len(max(databases, key=len)) + 7)","repo_name":"pasztorb71/scripts","sub_path":"Berci/scripts/checks/max_labels_in_instance.py","file_name":"max_labels_in_instance.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13550907671","text":"import pandas as pd\n\nfcc_file = \"data/fcc2.csv\"\nacs_file = \"data/acs2.csv\"\noutfile = \"data/fcc_acc_merged.csv\"\n\nfcc = pd.read_csv(fcc_file)\nacs = pd.read_csv(acs_file)\ncol = list(fcc.columns)\ncol[0] = 'id'\nfcc.columns = col\n\nout = fcc.join(acs.set_index('id'), on='id', how=\"left\")\nout.to_csv(outfile)\n\n\n\n","repo_name":"jhatkins999/ITEP","sub_path":"merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23816176413","text":"import logging\n\nfrom aiohttp.web import middleware, HTTPException\n\nimport settings\n\n\nclass Headers:\n \"\"\"Middleware that adds the Riot API Key to the request.\"\"\"\n\n def __init__(self):\n self.required_header = []\n self.logging = logging.getLogger(\"HeaderManager\")\n self.logging.propagate = False\n level = logging.INFO\n if settings.DEBUG:\n level = logging.DEBUG\n self.logging.setLevel(level)\n handler = logging.StreamHandler()\n handler.setLevel(level)\n handler.setFormatter(\n logging.Formatter(\"%(asctime)s [HeaderManager] %(message)s\")\n )\n self.logging.addHandler(handler)\n self.logging.info(\"Initiated middleware.\")\n\n @middleware\n async def middleware(self, request, handler):\n \"\"\"Process the request.\n\n request: Add X-Riot-Token Header with the API Key.\n response: No changes.\n \"\"\"\n headers = dict(request.headers)\n headers.update({\"X-Riot-Token\": settings.API_KEY})\n url = str(request.url)\n request = request.clone(headers=headers, rel_url=url.replace(\"http:\", \"https:\"))\n return await handler(request)\n\n\nclass ServerCheck:\n \"\"\"Middleware that makes sure the request is aimed at the proper server.\"\"\"\n\n def __init__(self):\n self.required_header = []\n self.legit_server = [\n \"br1\",\n \"eun1\",\n \"euw1\",\n \"jp1\",\n \"kr\",\n \"la1\",\n \"la2\",\n \"na1\",\n \"oc1\",\n \"tr1\",\n \"ru\",\n \"americas\",\n \"asia\",\n \"europe\",\n ]\n self.logging = logging.getLogger(\"ServerCheck\")\n self.logging.propagate = False\n level = logging.INFO\n if settings.DEBUG:\n level = logging.DEBUG\n self.logging.setLevel(level)\n handler = logging.StreamHandler()\n handler.setLevel(level)\n handler.setFormatter(logging.Formatter(\"%(asctime)s [ServerCheck] %(message)s\"))\n self.logging.addHandler(handler)\n self.logging.info(\"Initiated middleware.\")\n\n @middleware\n async def middleware(self, request, handler):\n \"\"\"Process the request.\n\n request: Check if correct server\n response: No changes.\n \"\"\"\n server = request.rel_url.__str__().split(\"http://\")[1].split(\".\")[0]\n if server not in self.legit_server:\n self.logging.info(\"Illegal Server.\")\n raise HTTPException\n return await handler(request)\n","repo_name":"DoctressWasTaken/Lightshield_proxy","sub_path":"middleware/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25083005017","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api\n\n\nclass EstateProperty(models.Model):\n ######################\n # Private attributes #\n ######################\n _inherit = \"estate.property\"\n\n ###################\n # Default methods #\n ###################\n\n ######################\n # Fields declaration #\n ######################\n\n ##############################\n # Compute and search methods #\n ##############################\n\n ############################\n # Constrains and onchanges #\n ############################\n\n #########################\n # CRUD method overrides #\n #########################\n def sell_a_property(self):\n for property in self:\n winning_offer = property.offer_ids.search(\n [(\"status\", \"=\", \"accepted\"), (\"property_id\", \"=\", property.id)])\n invoice_dict = {\n \"move_type\": \"out_invoice\",\n \"partner_id\": winning_offer.partner_id.id,\n \"invoice_line_ids\": [\n (0, 0, {\n \"name\": property.name,\n \"quantity\": 1,\n \"price_unit\": winning_offer.price * 0.06,\n }),\n (0, 0, {\n \"name\": \"Agency Fees\",\n \"quantity\": 1,\n \"price_unit\": 100,\n })\n ],\n }\n self.env[\"account.move\"].create(invoice_dict)\n\n return super().sell_a_property()\n\n ##################\n # Action methods #\n ##################\n\n ####################\n # Business methods #\n ####################\n","repo_name":"mikhail-trunks-silao/odoo16_personal_projects","sub_path":"estate_account/models/estate_property.py","file_name":"estate_property.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37823563069","text":"n = int(input())\narray = list(map(int,input().strip().split(' ')))\nday,month = map(int,input().split())\nlength = len(array)\ncounter = summation = 0\n\nfor n in range(length):\n for m in range(month):\n if n+m >= length:\n break\n else:\n summation += array[n+m]\n if summation == day:\n counter += 1\n summation = 0\nprint(counter)\n","repo_name":"shahedex/Hackerrank-Practices","sub_path":"Algorithms/Implementation/birthdaychocolate.py","file_name":"birthdaychocolate.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"353719174","text":"\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--language\", dest=\"language\", type=str)\nimport random\n\n\nargs=parser.parse_args()\nprint(args)\n\n\n\n\n\nfrom acqdivReader import AcqdivReader, AcqdivReaderPartition\n\nacqdivCorpusReader = AcqdivReader(args.language)\n\n\nimport syllabificationJapanese\n\nsyllables = {}\t\n\nfor chunk in acqdivCorpusReader.iterator():\n# print(len(chunk))\n\n # tokenize chunk into valid syllables\n words = chunk.split(\" \")\n for word in words:\n if word == \"\\n\":\n continue \n if len(word) == 0:\n continue\n if word == \"n\":\n continue\n if word == \"???\":\n continue\n syllabification = syllabificationJapanese.syllabify(word)\n if syllabification is None:\n continue\n for syll in syllabification:\n if syll[0] is None:\n continue\n syll = syll[0]+syll[1]+syll[2]\n syllables[syll] = syllables.get(syll, 0) + 1\n\nprint(syllables)\n","repo_name":"m-hahn/probing-char-lms","sub_path":"prepareJapaneseSyllableList.py","file_name":"prepareJapaneseSyllableList.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10717541526","text":"# -*- coding:utf-8 -*-\nfrom flask import render_template, request\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask import Flask\nfrom PIL import Image\nimport settings as st\nimport pymysql\nimport process\nimport datetime\nimport time\nimport os\n\napp = Flask(__name__)\npymysql.install_as_MySQLdb() # 链接 mysql 数据库\napp.config.from_object(st.BasicConfig) # 导入基本配置类\ndb = SQLAlchemy(app)\n\n\nclass History(db.Model):\n \"\"\"\n 创建用户历史记录表,有六个字段,存储用户的注册信息和提交记录\n \"\"\"\n __tablename__ = 'users_history'\n id = db.Column(db.Integer, primary_key=True, autoincrement=True) # 生成主码 id\n name = db.Column(db.String(64), index=True) # 存储用户名\n pwd = db.Column(db.String(64)) # 存储用户密码\n history = db.Column(db.String(255)) # 存储历史结果的 url\n date = db.Column(db.String(30)) # 以字符串形式存储时间,精确到秒\n accuracy = db.Column(db.Float) # 存储正确率\n\n\n@app.route('/')\ndef home():\n \"\"\"\n 进入客户端主页面\n \"\"\"\n return render_template('home_page.html')\n\n\n@app.route('/')\ndef user_home(username):\n \"\"\"\n 进入用户登录状态下的客户端主页面\n \"\"\"\n return render_template('home_page.html', is_success='pass', username=username)\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n \"\"\"\n 处理登录表单\n \"\"\"\n if request.method == 'POST':\n # 验证用户信息\n user_info = request.form.to_dict()\n name = user_info.get('username')\n query_out = History.query.filter_by(name=name, pwd=user_info.get('password')).all()\n if len(query_out) is not 0:\n return render_template('home_page.html', is_success='pass', username=name)\n else:\n return render_template('home_page.html', is_success='refuse')\n\n return render_template('home_page.html')\n\n\n@app.route('/apply', methods=['GET', 'POST'])\ndef product():\n \"\"\"\n 接收图片处理请求,此时为未登录状态\n \"\"\"\n if request.method == 'POST':\n # 接收上传的图片并验证后缀是否符合规范\n uploadFile = request.files['img']\n fileName = uploadFile.filename\n extName = os.path.splitext(fileName)[1]\n\n if extName in app.config['ALLOWED_EXTENSIONS']:\n uploadFile.save(st.IMAGE_HOME + os.sep + st.TARGET_IMAGE_NAME)\n accuracy = process.ImageProcess().getResult() # 进行图片处理\n return render_template('result_page.html', msg='正确率:' + str(accuracy))\n else:\n return render_template('apply_page.html', msg='文件类型错误, 希望接受到(.bmp |.png |.jpg)文件')\n\n else:\n return render_template('apply_page.html')\n\n\n@app.route('/apply/', methods=['GET', 'POST'])\ndef user_product(username):\n \"\"\"\n 登录状态下的上传图片处理\n \"\"\"\n if request.method == 'POST':\n print(\"enter\")\n\n uploadFile = request.files['img']\n fileName = uploadFile.filename\n extName = os.path.splitext(fileName)[1]\n\n if extName in app.config['ALLOWED_EXTENSIONS']:\n\n uploadFile.save(st.IMAGE_HOME + os.sep + st.TARGET_IMAGE_NAME)\n accuracy = process.ImageProcess().getResult()\n\n # 保存图片至数据库,文件名由时间和用户名组成\n query = History.query.filter_by(name=username).first()\n pwd = query.pwd\n now_time = time.localtime(time.time())\n now = time.strftime(\"%Y-%m-%d_%H_%M_%S\", now_time)\n history_path = st.HISTORY_HOME + now + f'_{username}.png'\n # 将图片保存到历史文件夹下面\n Image.open(st.IMAGE_HOME + os.sep + st.RESULT_IMAGE_NAME).save(history_path)\n recording = History(name=username, pwd=pwd, history=history_path, date=now, accuracy=accuracy)\n # 提交数据\n db.session.add(recording)\n db.session.commit()\n return render_template('result_page.html', msg='正确率:' + str(accuracy), username=username)\n else:\n return render_template('apply_page.html', msg='文件类型错误, 希望接受到(.bmp |.png |.jpg)文件', username=username)\n\n else:\n return render_template('apply_page.html', username=username)\n\n\n@app.route('/history/')\ndef history(username):\n \"\"\"\n 响应历史记录页面,将在数据库中查询到的数据发送到客户端\n \"\"\"\n # 得到所有历史记录\n search_result = History.query.filter_by(name=username).all()\n result = []\n for i in search_result:\n if i.history is not None:\n # 转化为时间类\n date_time = datetime.datetime.strptime(i.date, \"%Y-%m-%d_%H_%M_%S\")\n result.append((i.history, date_time, i.accuracy))\n # 结果按时间排序\n result.sort(key=lambda x: x[1])\n if len(result) is not 0:\n return render_template('history_page.html', result=result, username=username)\n else:\n return render_template('history_page.html', result='没有检测到历史记录!', username=username)\n\n\nif __name__ == '__main__':\n # 每次用户进入都会重新生成表\n db.drop_all()\n db.create_all()\n\n # 因为暂时不考虑注册功能,生成一位虚拟用户\n sign_one = History(name='张三', pwd='123')\n db.session.add(sign_one)\n db.session.commit()\n app.run()\n","repo_name":"molujia/cab","sub_path":"mlp2/com/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5459,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"29578029986","text":"from services import DatabaseService\ndb = DatabaseService()\n\ndef add_contact():\n name = input(\"Enter Name: \")\n mobile_no = input(\"Enter Mobile No.: \")\n if db.add_contact(name,mobile_no) == -1:\n print(\"Invalid Input\")\n\ndef add_address():\n name = input(\"Enter stored contact name: \")\n city = input(\"Enter city: \")\n pin = input(\"Enter pincode: \")\n if db.add_address(name, city, pin):\n print(\"Invalid Input\")\n\ndef modify_contact():\n current_name = input(\"Enter stored contact name: \") \n name = input(\"Enter new name(- to keep same): \")\n mobile_no = input(\"Enter new mobile no.(- to keep same): \")\n data = {}\n if name != '-':\n data['CNAME'] = name\n if mobile_no != '-':\n data['MOBILE_NO'] = mobile_no\n if db.modify_contact(current_name, data):\n print(\"Invalid Input\")\n\ndef remove_contact():\n name = input(\"Enter stored contact name:\")\n if db.remove_contact(name):\n print(\"Invalid Input\")\n\ndef show_contacts():\n contacts = db.get_all_contacts()\n count = 0\n name = None\n for i, contact in enumerate(contacts):\n if(contact[0] != name):\n count += 1\n name = contact[0]\n print(\"Contact\", count)\n print(\"\\tName:\", contact[0])\n print(\"\\tMobile No.:\", contact[1])\n if contact[2] != None:\n print(\"\\tAddress:\\n\\tCity: \", contact[2], \"\\tPincode: \", contact[3])\n\ndef contact_app():\n while(True):\n print(\"1. Add Contact\\n2. Modify Contact\\n3. Remove Contact\\n4. Show Contacts\\n5. Add Address\\n6. Exit\\n:\", end=\"\")\n c = int(input())\n if c == 1:\n add_contact()\n elif c == 2:\n modify_contact()\n elif c == 3:\n remove_contact()\n elif c == 4:\n show_contacts()\n elif c == 5:\n add_address()\n else:\n break\n\nif __name__ == \"__main__\":\n contact_app()","repo_name":"Ay9040/SwabhavTechInternship","sub_path":"Session06/contacts_app_part2/contact_ui.py","file_name":"contact_ui.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31599468141","text":"import numpy as np\nimport cv2\nimport torch\nimport pycocotools.mask as mask_util\nfrom PIL import Image\n\n\ndef get_mask_features(image, mask, model, preprocess=None):\n \"\"\"\n Computes the features of the mask portion of an image.\n\n Args:\n image (ndarray): The input image.\n mask (ndarray): The mask for cropping the image.\n model: The CLIP model used for encoding image features.\n\n Returns:\n mask_features (Tensor): The features of the mask portion of the image.\n \"\"\"\n # Apply the mask to the image\n masked_image = image.copy()\n masked_image[~mask] = 0\n\n # Preprocess the masked image\n masked_image = preprocess(Image.fromarray(masked_image))\n\n # Convert the image to tensor and move to GPU\n # image_input = torch.tensor(masked_image).unsqueeze(0).cuda()\n image_input = masked_image.unsqueeze(0).cuda().float()\n\n with torch.no_grad():\n # Encode image features\n image_features = model.encode_image(image_input).float()\n\n return image_features.detach().cpu().numpy()\n\n\ndef generate_mask_id(mask_features, existing_masks, threshold=6.0, distance_metric=\"euclidean\"):\n \"\"\"\n Generates an ID for the mask based on its features and compares it with existing masks.\n\n Args:\n mask_features (ndarray): The features of the mask.\n existing_masks (list): List of existing masks and their features.\n threshold (float): Similarity threshold for considering a match (default: 0.9).\n distance_metric (str): Distance metric to be used (default: \"euclidean\").\n Options: \"euclidean\", \"cosine\".\n\n Returns:\n mask_id (int): The generated ID for the mask.\n \"\"\"\n from scipy.spatial.distance import euclidean, cosine\n\n mask_id = -1 # Initialize the mask ID\n\n if distance_metric == \"euclidean\":\n distance_function = euclidean\n elif distance_metric == \"cosine\":\n distance_function = cosine\n else:\n raise ValueError(\n \"Invalid distance metric. Choose either 'euclidean' or 'cosine'.\")\n\n for idx, (existing_id, existing_features) in enumerate(existing_masks):\n similarity = distance_function(\n mask_features.flatten(), existing_features.flatten())\n\n if similarity < threshold:\n mask_id = existing_id\n break\n\n if mask_id == -1:\n # Assign a new ID if no match is found\n mask_id = len(existing_masks) + 1\n existing_masks.append((mask_id, mask_features.flatten()))\n\n return mask_id\n\n\ndef convert_to_annolid_format(frame_number,\n masks,\n frame=None,\n model=None,\n min_mask_area=float('-inf'),\n max_mask_area=float('inf'),\n existing_masks=None\n ):\n \"\"\"Converts predicted SAM masks information to annolid format.\n\n Args:\n frame_number (int): The frame number associated with the masks.\n masks (list): List of dictionaries representing the predicted masks.\n Each dictionary should contain the following keys:\n -segmentation : the mask\n -area : the area of the mask in pixels\n -bbox : the boundary box of the mask in XYWH format\n -predicted_iou : the model's own prediction for the quality of the mask\n -point_coords : the sampled input point that generated this mask\n -stability_score : an additional measure of mask quality\n -crop_box : the crop of the image used to generate this mask in XYWH format\n\n Returns:\n list: List of dictionaries representing the masks in annolid format.\n Each dictionary contains the following keys:\n - \"frame_number\": The frame number associated with the masks.\n - \"x1\", \"y1\", \"x2\", \"y2\": The coordinates of the bounding box in XYXY format.\n - \"instance_name\": The name of the instance/object.\n - \"class_score\": The predicted IoU (Intersection over Union) for the mask.\n - \"segmentation\": The segmentation mask.\n - \"tracking_id\": The tracking ID associated with the mask.\n\n \"\"\"\n pred_rows = []\n for mask in masks:\n mask_area = mask.get(\"area\", 0)\n if min_mask_area <= mask_area <= max_mask_area:\n x1 = mask.get(\"bbox\")[0]\n y1 = mask.get(\"bbox\")[1]\n x2 = mask.get(\"bbox\")[0] + mask.get(\"bbox\")[2]\n y2 = mask.get(\"bbox\")[1] + mask.get(\"bbox\")[3]\n score = mask.get(\"predicted_iou\", '')\n segmentation = mask.get(\"segmentation\", '')\n mask_features = get_mask_features(frame, segmentation, model)\n mask_id = generate_mask_id(mask_features, existing_masks)\n instance_name = mask.get(\"instance_name\", f'instance_{mask_id}')\n segmentation = mask_util.encode(segmentation)\n tracking_id = mask.get(\"tracking_id\", \"\")\n\n pred_rows.append({\n \"frame_number\": frame_number,\n \"x1\": x1,\n \"y1\": y1,\n \"x2\": x2,\n \"y2\": y2,\n \"instance_name\": instance_name,\n \"class_score\": score,\n \"segmentation\": segmentation,\n \"tracking_id\": tracking_id\n })\n\n return pred_rows\n\n\ndef crop_image_with_masks(image,\n masks,\n max_area=8000,\n min_area=500,\n width_height_ratio=0.9):\n \"\"\"\n Crop the image based on provided masks and apply the masks to each cropped region.\n\n Args:\n image (numpy.ndarray): The input image.\n masks (list): A list of dictionaries containing mask data.\n max_area (int): Max area of the mask\n min_area (int): Min area of the mask\n width_height_ratio(float): Min width / height\n\n Returns:\n list: A list of cropped images with applied masks.\n \"\"\"\n cropped_images = []\n\n for mask_data in masks:\n # Extract mask and bounding box data\n bbox = mask_data['bbox']\n seg = mask_data['segmentation']\n x, y, w, h = bbox\n\n # Crop the image based on the bounding box\n cropped_image = image[y:y+h, x:x+w]\n\n # Create an 8-bit mask from the segmentation data\n mask = np.asarray(seg[y:y+h, x:x+w], dtype=np.uint8) * 255\n # Apply the mask to the cropped image\n cropped_image = cv2.bitwise_and(\n cropped_image, cropped_image, mask=mask)\n cropped_image = cv2.cvtColor(cropped_image, cv2.COLOR_BGR2RGB)\n if (mask_data['area'] >= min_area and\n mask_data['area'] <= max_area and\n w/h >= width_height_ratio):\n cropped_images.append(cropped_image)\n\n return cropped_images\n\n\ndef process_video_and_save_tracking_results(video_file, mask_generator, model=None):\n \"\"\"\n Process a video file, generate tracking results with segmentation masks,\n and save the results to a CSV file.\n\n Args:\n video_file (str): Path to the video file.\n mask_generator: An instance of the mask generator class.\n\n Returns:\n None\n \"\"\"\n import decord as de\n import pandas as pd\n video_reader = de.VideoReader(video_file)\n tracking_results = []\n existing_masks = []\n\n for key_index in video_reader.get_key_indices():\n frame = video_reader[key_index].asnumpy()\n masks = mask_generator.generate(frame)\n tracking_results += convert_to_annolid_format(\n key_index, masks, frame, model, existing_masks=existing_masks)\n print(key_index)\n\n dataframe = pd.DataFrame(tracking_results)\n output_file = f\"{video_file.split('.')[0]}_mask_tracking_results_with_segmentation.csv\"\n dataframe.to_csv(output_file)\n","repo_name":"healthonrails/annolid","sub_path":"annolid/segmentation/SAM/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7958,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"21"} +{"seq_id":"70045675574","text":"__author__ = \"David Brown \"\n__contributors__ = []\n__copyright__ = \"Copyright (c) 2012 Cisco and/or its affiliates.\"\n__license__ = \"Cisco Sample Code License, Version 1.1\"\n\nfrom flask import Flask, make_response, render_template, redirect, url_for, session\nfrom markupsafe import Markup\nfrom includes import *\nfrom json2html import *\nimport json\nfrom urllib import parse\n\napp = Flask(__name__)\napp.secret_key = 'any random string'\n\n\n###########################################################################\n# Prompt user to set vManage settings\n###########################################################################\n\n@app.route('/')\ndef getsettings():\n vmanage = request.cookies.get('vmanage')\n userid = request.cookies.get('userid')\n password = request.cookies.get('password')\n if vmanage is None:\n vmanage = userid = password = 'not set'\n return render_template('getsettings.html', vmanage=vmanage, userid=userid, password=password,\n secret='*****' + password[-2:])\n\n\n###########################################################################\n# Read and save settings\n###########################################################################\n\n@app.route('/savesettings')\ndef savesettings():\n resp = make_response(redirect(url_for('menu')))\n\n # Save vManage settings in a cookie\n for arg in request.args:\n resp.set_cookie(arg, request.args.get(arg), secure=True, httponly=True)\n\n return resp\n\n\n###########################################################################\n# Main menu. This screen also clears any leftover session variables\n###########################################################################\n\n@app.route('/menu')\ndef menu():\n # Clear user session variables from previous tasks\n session.clear()\n try:\n vmanage = login()\n # Problems logging into vManage should be caught here...\n except Exception as err:\n return render_template('error.html', err=err)\n devices = vmanage.get_request('system/device/vedges')\n vmanage.logout()\n models = '\\n'\n for device in devices['data']:\n if device['deviceModel'] not in models:\n models += f'\\n'\n return render_template('menu.html', vmanage=request.cookies.get('vmanage'), models=Markup(models))\n\n\n###########################################################################\n# List edges. Takes parameters model and mode.\n###########################################################################\n\n@app.route('/listedges')\ndef listedges():\n model = request.args.get('model') or 'all'\n mode = request.args.get('mode') or 'all'\n\n vmanage = login()\n data = list_edges(vmanage, mode=mode, model=model)\n vmanage.logout()\n data.insert(0, ['UUID', 'Hostname', 'Model', 'Mode'])\n output = buildtable(data)\n\n return render_template('table.html', title='List Edges', instructions='List of all edge devices',\n data=Markup(output))\n\n\n###########################################################################\n# List templates. Takes parameter model.\n###########################################################################\n\n@app.route('/listtemplates')\ndef listtemplates():\n model = request.args.get('model') or 'all'\n vmanage = login()\n data = list_templates(vmanage, model)\n vmanage.logout()\n data.insert(0, ['UUID', 'Name', 'Description', 'Device Type'])\n output = buildtable(data)\n\n return render_template('table.html', title='List Templates', instructions='List of all templates',\n data=Markup(output))\n\n\n###########################################################################\n# RMA Edge. Collects device to replace, new device, and template details\n###########################################################################\n@app.route('/rmaedge')\ndef rmaedge():\n model = request.args.get('model') or session['model']\n session['model'] = model\n # List edges in vManage mode for user to select from\n # If oldedge is already set move to the next step.\n try:\n oldedge = request.args.get('oldedge') or session['oldedge']\n #oldedge = parse.quote_plus(oldedge)\n session['oldedge'] = oldedge\n except KeyError:\n vmanage = login()\n data = list_edges(vmanage, mode='vmanage', model=model)\n data.insert(0, ['UUID', 'Hostname', 'Model', 'Mode'])\n output = buildtable(data, link='/rmaedge?oldedge=')\n vmanage.logout()\n return render_template('table.html', data=Markup(output), title='Pick Old Edge',\n instructions=Markup('Select the Edge device to replace:

'))\n\n # List edges in CLI mode for user to choose from.\n # If replacement edge is already set, move to the next step.\n try:\n newedge = request.args.get('newedge') or session['newedge']\n session['newedge'] = newedge\n except KeyError:\n vmanage = login()\n oldedge = parse.quote_plus(oldedge)\n model = vmanage.get_request(f'device/models/{oldedge}')['name']\n session['model'] = model\n data = list_edges(vmanage, mode='cli', model=model)\n data.insert(0, ['UUID', 'Hostname', 'Model', 'Mode'])\n output = buildtable(data, link='/rmaedge?newedge=')\n vmanage.logout()\n return render_template('table.html', data=Markup(output), title='Pick New Edge',\n instructions=Markup('Select the replacement Edge:

'))\n\n #\n # Gather data and pass to the RMA confirmation page\n #\n\n vmanage = login()\n print(oldedge)\n template = get_device_template_variables(vmanage, oldedge)\n session['template'] = template\n vmanage.logout()\n jtemplate = Markup(json2html.convert(template))\n return render_template('rmaconfirm.html', template=jtemplate, oldedge=oldedge, newedge=newedge)\n\n\n###########################################################################\n# RMA Edge confirmation screen prompts for confirmation and executes exchange\n###########################################################################\n@app.route('/rmaconfirm')\ndef rmaconfirm():\n #\n # Deletes oldedge, attaches template to newedge, returns job result\n #\n\n # Invalidate Device Certificate\n vmanage = login()\n cert_status = set_certificate(vmanage, session['oldedge'], session['model'], 'invalid')\n output = 'Invalidate Certificate:
'\n output += str(cert_status)\n\n # Delete old device\n delete_status = vmanage.delete_request(f'system/device/{session[\"oldedge\"]}')\n output += '
Delete Edge:
'\n output += str(delete_status)\n\n # Create template variables JSON object with new UUID\n template = session['template']\n template['device'][0]['csv-deviceId'] = session['newedge']\n payload = {\"deviceTemplateList\": [\n template\n ]\n }\n output += '
Build template payload
'\n output += (json.dumps(payload, indent=2))\n\n # Attach template to new edge\n attach_job = vmanage.post_request('template/device/config/attachment', payload=payload)\n output += '
Attach Template:
'\n output += str(attach_job)\n output += action_status(vmanage, attach_job['id'])\n\n vmanage.logout()\n output += '

Return to main menu'\n return Markup(output)\n\n\n###########################################################################\n# Edit edge. Collects edge device, displays form with template values\n###########################################################################\n@app.route('/editedge')\ndef editedge():\n model = request.args.get('model') or session['model']\n session['model'] = model\n # Build a table of edges for user to select from.\n # If edge has already been set, move to next step.\n try:\n edge = request.args.get('edge') or session['edge']\n session['edge'] = edge\n except KeyError:\n vmanage = login()\n data = list_edges(vmanage, mode='vmanage', model=model)\n data.insert(0, ['UUID', 'Hostname', 'Model', 'Mode'])\n output = buildtable(data, link='/editedge?edge=')\n vmanage.logout()\n return render_template('table.html', data=Markup(output), title='Edit Edge Values',\n instructions=Markup('Select the Edge device to edit:

'))\n\n # Build a form of template variables for user to edit.\n # Uses templateId parameter or finds attached templateId\n # Post form to update template\n vmanage = login()\n try:\n templateId = request.args.get('templateId') or session['templateId']\n try:\n template = get_device_template_variables(vmanage, edge, templateId)\n # If user does not have Write privileges, error will be caught here\n except Exception as err:\n return render_template('error.html', err=err)\n except KeyError:\n template = get_device_template_variables(vmanage, edge)\n vmanage.logout()\n session['template'] = template\n data = template['device'][0]\n tabdata = [['Field', 'Value']]\n formdata = {}\n for item in data:\n if item[0] == '/':\n formdata[item] = data[item]\n else:\n tabdata.append([item, data[item]])\n output = buildtable(tabdata)\n output += buildform(formdata, action='/updatetemp')\n return render_template('table.html', data=Markup(output), title='Edit Edge Values',\n instructions=Markup(\n 'Edit any values below and submit to update the device configuration:

'))\n\n\n###########################################################################\n# Attach template and monitor job result\n###########################################################################\n@app.route('/updatetemp', methods=['POST'])\ndef updatetemp():\n # Retrieve variables and modify template\n template = session['template']\n output = 'Return to Main Menu.
'\n output += 'Old Template:
' + json2html.convert(template)\n\n # Create template variables JSON object with new UUID\n variables = request.form\n for value in variables:\n template['device'][0][value] = variables[value]\n payload = {\"deviceTemplateList\": [\n template\n ]\n }\n output += \"
New Template:
\" + json2html.convert(payload)\n\n # Attach template to new edge\n vmanage = login()\n attach_job = vmanage.post_request('template/device/config/attachment', payload=payload)\n output += '
Attach Template:
'\n output += str(attach_job)\n output += action_status(vmanage, attach_job['id'])\n vmanage.logout()\n\n output += '

Return to main menu'\n\n return Markup(output)\n\n\n###########################################################################\n# Deploy new edge. Prompt for edge, prompt for template, hand off to edit edge\n###########################################################################\n@app.route('/deployedge')\ndef deployedge():\n # List edges in CLI mode for user to choose from.\n # If replacement edge is already set, move to the next step.\n model = request.args.get('model') or session['model']\n session['model'] = model\n try:\n edge = request.args.get('edge') or session['edge']\n session['edge'] = edge\n except KeyError:\n vmanage = login()\n data = list_edges(vmanage, mode='cli', model=model)\n data.insert(0, ['UUID', 'Hostname', 'Model', 'Mode'])\n for edge in data:\n edgelink = f'{edge[0]}'\n edge[0] = edgelink\n output = buildtable(data)\n vmanage.logout()\n return render_template('table.html', data=Markup(output), title='Pick New Edge',\n instructions=Markup('Select the replacement Edge:

'))\n\n # Build a list of templates that apply to the edge deviceType for the user to choose from\n # Send the templateId and deviceId to the Edit Edge routine\n vmanage = login()\n data = list_templates(vmanage, model=session['model'])\n vmanage.logout()\n data.insert(0, ['uuid', 'Name', 'Description', 'device type'])\n output = buildtable(data, link='/editedge?templateId=')\n return render_template('table.html', data=Markup(output), title='Pick a template',\n instructions=Markup('Select the template to apply:

'))\n\n\nif __name__ == '__main__':\n # This is used when running locally only. When deploying to Google App\n # Engine, a webserver process such as Gunicorn will serve the app. This\n # can be configured by adding an `entrypoint` to app.yaml.\n app.run(host='127.0.0.1', port=8080, debug=True)\n# [END gae_python38_app]\n","repo_name":"CiscoSE/vManageOpsGUI","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12894,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"8022384795","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 1 22:02:44 2019\n\n@author: mauri\n\"\"\"\nimport sys\n#sys.path.insert(1, 'C:\\\\Users\\\\mauri\\\\proyectos\\\\GPUSCPRepair\\\\cudaTest\\\\reparacionGpu')\n\nimport numpy as np\nfrom Problema.scp import read_instance as r_instance\nfrom Problema.scp import binarizationstrategy as _binarization\nfrom Problema.scp.repair import ReparaStrategy as _repara\nfrom datetime import datetime\n#import multiprocessing as mp\n#from numpy.random import default_rng\nfrom Problema.scp.repair import cumpleRestricciones as reparaGPU\nfrom Problema.scp.permutationRank import PermRank\nfrom Problema.Problema import Problema\n\n\nclass SCP(Problema):\n def __init__(self, instancePath = None):\n# print(f'LEYENDO INSTANCIA')\n self.mejorEvaluacion = None\n self.mejoresSoluciones = None\n self.mejorEvaluacion = None\n self.parametros = {}\n self.instancia = instancePath\n self.instance = r_instance.Read(instancePath)\n self.optimo = self.instance.optimo\n# print(f'FIN LEYENDO INSTANCIA')\n if(self.instance.columns != np.array(self.instance.get_c()).shape[0]):\n raise Exception(f'self.instance.columns {self.instance.columns} != np.array(self.instance.get_c()).shape[1] {np.array(self.instance.get_c()).shape[1]})')\n self.tTransferencia = \"sShape2\"\n self.tBinary = \"Standar\" \n self.binarizationStrategy = _binarization.BinarizationStrategy(self.tTransferencia, self.tBinary) \n self.repair = _repara.ReparaStrategy(self.instance.get_r()\n ,self.instance.get_c()\n ,self.instance.get_rows()\n ,self.instance.get_columns())\n self.paralelo = False\n self.penalizar = False\n self.mejorSolHist = np.ones((self.instance.get_columns())) * 0.5\n self.mejorFitness = None\n\n self.partSize = 8\n self.rangeMax = []\n self.permRank = PermRank()\n self.particiones = []\n for _ in range(int(self.instance.get_columns()/self.partSize)):\n self.rangeMax.append(self.permRank.totalPerm(self.partSize))\n self.particiones.append(self.partSize)\n\n if self.instance.get_columns()%self.partSize > 0:\n self.rangeMax.append(self.permRank.totalPerm(self.instance.get_columns()%self.partSize))\n self.particiones.append(self.instance.get_columns()%self.partSize)\n self.rangeMax = np.array(self.rangeMax)\n self.particiones = np.array(self.particiones)\n\n\n def getNombre(self):\n return 'SCP'\n \n def getNumDim(self):\n return self.instance.columns\n #return self.particiones.shape[0]\n\n def getRangoSolucion(self):\n return {'max': self.rangeMax, 'min':np.zeros(self.rangeMax.shape[0])}\n\n def getDominioDim(self):\n return [-10,1]\n\n def evalObj(self, soluciones):\n decoded, _ = self.decodeInstancesBatch(soluciones)\n return self.evalInstanceBatch(decoded)\n\n def getIndiceMejora(self):\n return self.indiceMejora\n\n def getMejorEvaluacion(self):\n return self.mejorEvaluacion\n\n def setParametros(self, parametros):\n for parametro in parametros:\n self.parametros[parametro] = parametros[parametro]\n \n def getParametros(self):\n return self.parametros\n\n def evaluarFitness(self, soluciones):\n evaluaciones = self.evalObj(soluciones)\n mejorEvaluacion = np.min(evaluaciones)\n if self.mejorEvaluacion is None: self.mejorEvaluacion = mejorEvaluacion\n idxMejorEval = evaluaciones == mejorEvaluacion\n mejoresSoluciones = np.unique(soluciones[idxMejorEval], axis=0)\n self.indiceMejora = self.getIndsMejora(self.mejorEvaluacion,mejorEvaluacion)\n if mejorEvaluacion < self.mejorEvaluacion:\n self.mejoresSoluciones = mejoresSoluciones\n self.mejorEvaluacion = mejorEvaluacion\n if mejorEvaluacion == self.mejorEvaluacion:\n mejoresSolucionesL = list(mejoresSoluciones)\n if self.mejoresSoluciones is not None:\n mejoresSolucionesL.extend(list(self.mejoresSoluciones))\n self.mejoresSoluciones = np.unique(np.array(mejoresSolucionesL), axis=0)\n \n return evaluaciones\n\n def getIndsMejora(self, f1, f2):\n #cuanto mejora f2 a f1 \n assert f1.shape == f2.shape, f\"Fitness 1 {f1.shape} diferente a fitness 2 {f2.shape}\"\n return (f1-f2)/f1\n\n def getMejorIdx(self, fitness):\n return np.argmin(fitness)\n\n def getPeorIdx(self, fitness):\n return np.argmax(fitness)\n\n def eval(self, encodedInstance):\n decoded, numReparaciones = self.frepara(encodedInstance)\n fitness = self.evalInstance(encodedInstance)\n return fitness, decoded, numReparaciones\n\n\n def evalEnc(self, encodedInstance):\n decoded, numReparaciones = self.decodeInstance(encodedInstance)\n fitness = self.evalInstance(decoded)\n if self.mejorFitness is None or fitness > self.mejorFitness:\n self.mejorFitness = fitness\n self.binarizationStrategy.mejorSol = decoded\n encoded = self.encodeInstance(decoded)\n return fitness, decoded, numReparaciones,encoded\n\n def evalEncBatch(self, encodedInstances):\n inicio = datetime.now()\n decoded, numReparaciones = self.decodeInstancesBatch(encodedInstances)\n fin = datetime.now()\n #print(f\"decoding demoro {fin-inicio}\")\n #print(f\"evalEncBatch {decoded.shape}\")\n #exit()\n fitness = self.evalInstanceBatch(decoded)\n \n #encoded = self.encodeInstanceBatch(decoded)\n encoded = decoded.astype(float)\n return fitness, decoded, numReparaciones, encoded\n \n def evalDecBatch(self, encodedInstances, mejorSol):\n fitness = self.evalInstanceBatch(encodedInstances)\n \n \n return fitness, encodedInstances, None\n \n def encodeInstanceBatch(self, decodedInstances):\n ret = np.array([self.encodeInstance(decodedInstances[i]) for i in range(decodedInstances.shape[0])],dtype=float)\n return ret\n\n def encodeInstance(self, decodedInstance):\n currIdx = 0\n res = []\n for partSize in self.particiones:\n res.append(self.permRank.getRank(decodedInstance[currIdx:currIdx+partSize]))\n currIdx+=partSize\n return np.array(res)\n\n# @profile\n \n def decodeInstancesBatch(self, encodedInstances):\n start = datetime.now()\n b = np.array([self.binarizationStrategy.binarize(inst) for inst in encodedInstances])\n #print(encodedInstances)\n encodedInstances = np.array(b)\n #encodedInstances = np.array(encodedInstances)\n #print(encodedInstances.shape)\n #exit()\n #print(f\"encoded instances: {encodedInstances.shape}\")\n #b = np.array([self.decodeInstance(encodedInstances[i,:])[0] for i in range(encodedInstances.shape[0])])\n #print(f\"discretizado: {b.shape}\")\n #exit()\n end = datetime.now()\n\n binTime = end-start\n numReparaciones = 0\n \n repaired = self.freparaBatch(b)\n return repaired, numReparaciones\n \n \n def decodeInstance(self, encodedInstance):\n encodedInstance = np.array(encodedInstance).astype(np.int8)\n if encodedInstance.shape[0] != self.particiones.shape[0]:\n raise Exception(\"La instancia encodeada cambio su tamaño\")\n\n binario = []\n #print(encodedInstance)\n #raise Exception\n for idx in range(encodedInstance.shape[0]):\n #print(f\"self.particiones[idx], encodedInstance[idx] {self.particiones[idx]}, {encodedInstance[idx]}\")\n binario.extend(self.permRank.unrank(self.particiones[idx], encodedInstance[idx]).tolist())\n b = np.array(binario)\n \n\n #b = self.binarizationStrategy.binarize(encodedInstance)\n numReparaciones = 0\n #if not self.penalizar:\n # b, numReparaciones = self.frepara(b)\n return b, numReparaciones\n \n def binarize(self, x):\n return _binarization.BinarizationStrategy(x,self.tTransferencia, self.tBinary)\n \n# @profile\n def evalInstance(self, decoded):\n return -(self.fObj(decoded, self.instance.get_c())) if self.repair.cumple(decoded) == 1 else -1000000\n \n def evalInstanceBatch(self, decoded):\n start = datetime.now()\n ret = np.sum(np.array(self.instance.get_c())*decoded, axis=1)\n end = datetime.now()\n return ret\n \n# @profile\n def fObj(self, pos,costo):\n return np.sum(np.array(pos) * np.array(costo))\n \n# @profile\n def freparaBatch(self,x):\n start = datetime.now()\n #print(f\"freparaBatch {x.shape}\")\n #exit()\n reparadas = reparaGPU.reparaSoluciones(x, self.instance.get_r(), self.instance.get_c(), self.instance.pondRestricciones)\n #print(reparadas.shape)\n #exit()\n end = datetime.now()\n #print(f\"reparacion demoro {end-start}\")\n return reparadas\n \n \n def frepara(self,x):\n start = datetime.now()\n cumpleTodas=0\n cumpleTodas=self.repair.cumple(x)\n if cumpleTodas == 1: return x, 0\n \n x, numReparaciones = self.repair.repara_one(x) \n x = self.mejoraSolucion(x)\n end = datetime.now()\n return x, numReparaciones\n \n def mejoraSolucion(self, solucion):\n solucion = np.array(solucion)\n costos = solucion * self.instance.get_c()\n cosOrd = np.argsort(costos)[::-1]\n for pos in cosOrd:\n if costos[pos] == 0: break\n modificado = solucion.copy()\n modificado[pos] = 0\n if self.repair.cumple(modificado) == 1:\n solucion = modificado\n return solucion\n \n def generarSoluciones(self, numSols):\n# args = []\n #mejorSol = None\n if self.mejoresSoluciones is None:\n args = np.zeros((numSols, self.getNumDim()), dtype=np.float)\n #args = np.ones((numSols, self.getNumDim()), dtype=np.float)\n #args = np.random.randint(low=self.getRangoSolucion()['min'], high=self.getRangoSolucion()['max'], size=(numSols,self.getRangoSolucion()['max'].shape[0]))\n #print(args)\n #exit()\n else:\n #self.mejorSolHist = (mejorSol+self.mejorSolHist)/2\n args = []\n for i in range(numSols):\n idx = np.random.randint(low=0, high=self.mejoresSoluciones.shape[0])\n sol = self.mejoresSoluciones[idx].copy()\n #idx = np.random.randint(low=0, high=sol.shape[0])\n #print(np.argwhere(sol > 0).reshape(-1))\n #exit()\n idx = np.random.choice(np.argwhere(sol > np.mean(sol)*1.5).reshape(-1), 1)[0]\n #print(idx)\n #exit()\n \n sol[idx] += np.random.randint(low=-10, high=-1)\n #if sol[idx] > self.particiones[idx]: sol[idx] = self.particiones[idx]\n #if sol[idx] < 0: sol[idx] = 0\n args.append(sol)\n args = np.array(args)\n fitness = []\n ant = self.penalizar\n self.penalizar = False\n fitness, _, _, sol = self.evalEncBatch(args)\n return sol\n \n def graficarSol(self, datosNivel, parametros, nivel, id = 0):\n if not hasattr(self, 'graficador'):\n self.initGrafico()\n y = datosNivel['soluciones'][0]\n vels = datosNivel['velocidades'][0]\n self.graficador.live_plotter(np.arange(y.shape[0]),y, 'soluciones', dotSize=0.1, marker='.')\n self.graficador.live_plotter(np.arange(vels.shape[0]), vels, 'velocidades', dotSize=0.1, marker='.')\n self.graficador.live_plotter(np.arange(parametros.shape[0]), parametros, 'paramVel', dotSize=1.5, marker='-')\n ","repo_name":"mauriceaux/solverMH","sub_path":"Problema/SCP.py","file_name":"SCP.py","file_ext":"py","file_size_in_byte":11991,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"39990559900","text":"import numpy as np\nimport argparse\nimport cv2\nimport time\nfrom scipy import signal\nfrom util import writePFM\n\nDEBUG = False\n\nparser = argparse.ArgumentParser(description=\"Disparity Estimation\")\nparser.add_argument(\n \"--input-left\",\n default=\"./data/Synthetic/TL0.png\",\n type=str,\n help=\"input left image\",\n)\nparser.add_argument(\n \"--input-right\",\n default=\"./data/Synthetic/TR0.png\",\n type=str,\n help=\"input right image\",\n)\nparser.add_argument(\n \"--output\", default=\"./TL0.pfm\", type=str, help=\"left disparity map\"\n)\n\n# You can modify the function interface as you like\ndef computeDisp(Il, Ir, max_disp=64):\n h, w, ch = Il.shape\n labels = np.zeros((h, w), dtype=np.float32)\n Il = Il.astype(np.float32)\n Ir = Ir.astype(np.float32)\n\n # padding with size 32\n block_size = 5\n half_size = int((block_size-1)/2)#2\n padding_size = half_size*16\n Il_padding = cv2.copyMakeBorder( Il,padding_size,padding_size,padding_size,padding_size,cv2.BORDER_REFLECT)\n Ir_padding = cv2.copyMakeBorder( Ir,padding_size,padding_size,padding_size,padding_size,cv2.BORDER_REFLECT)#BORDER_REPLICATE\n\n # BGR2GRAY\n imgL = cv2.cvtColor(Il_padding, cv2.COLOR_BGR2GRAY)\n imgR = cv2.cvtColor(Ir_padding, cv2.COLOR_BGR2GRAY)\n\n imgh,imgw = imgL.shape[:2]\n disL = np.zeros((imgh, imgw), dtype=np.float32)\n num_disp = block_size + max_disp # search range\n distmp = np.zeros((imgh, imgw)) # check if the pixel is filled\n\n for i in range(half_size,imgh-half_size):\n for j in range(half_size,imgw-half_size):\n tpl=imgL[i-half_size:i+half_size+1,j-half_size:j+half_size+1]\n left_bound = j-num_disp\n right_bound = j+half_size\n if left_bound < 0:\n left_bound = 0\n if right_bound >= imgw:\n right_bound = imgw-1\n target=imgR[i-half_size:i+half_size+1,left_bound:right_bound+1]\n\n result=cv2.matchTemplate(target,tpl, cv2.TM_CCOEFF_NORMED)\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)\n max_loc = (max_loc[0]+left_bound,max_loc[1])\n\n\n if distmp[i,max_loc[0]+half_size] == 0 :\n disL[i,j] = j-(max_loc[0]+half_size)\n distmp[i,max_loc[0]+half_size] =1\n\n disL = disL[padding_size:-padding_size,padding_size:-padding_size]\n disL = disL.astype(np.uint8)#.astype(np.int)\n\n # Disparity refinement\n # hole filling\n FL = disL.copy()\n for i in range(disL.shape[0]):\n maybe_valid = 0\n for j in range(disL.shape[1]):#from left to right\n if FL[i,j] != 0:\n maybe_valid = FL[i,j]\n else:\n FL[i,j] = maybe_valid\n # median filter\n labels = signal.medfilt2d(FL,15)\n\n\n return labels.astype(np.float32)#.astype(np.uint8)\n\n\ndef main():\n DEBUG = True\n args = parser.parse_args()\n\n print(args.output)\n print(\"Compute disparity for %s\" % args.input_left)\n img_left = cv2.imread(args.input_left)\n img_right = cv2.imread(args.input_right)\n tic = time.time()\n disp = computeDisp(img_left, img_right)\n toc = time.time()\n writePFM(args.output, disp)\n print(\"Elapsed time: %f sec.\" % (toc - tic))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jaylotw/cv_final","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26650204390","text":"import smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom src.mailing.credentials import Credentials\nclass EmailBot:\n def __init__(self, destination_address, subject, message:str) -> None:\n self.sender_address = Credentials.MY_ADDRESS\n self.destination_address = destination_address\n self.subject = subject\n self.text = message\n\n def send(self):\n # connect to server\n server = smtplib.SMTP(host=Credentials.HOST_ADDRESS, port=Credentials.HOST_PORT)\n server.starttls()\n server.login(Credentials.MY_ADDRESS, Credentials.MY_PASSWORD)\n\n #creation of MIMEMultipart Object\n message = MIMEMultipart(\"alternative\")\n message[\"From\"] = Credentials.MY_ADDRESS\n message[\"To\"] = self.destination_address\n message[\"Subject\"] = self.subject\n\n # creation of MIMEText Part\n textPart = MIMEText(self.text, \"html\")\n\n # part attachment\n message.attach(textPart)\n\n # send email and close connection\n server.send_message(message)\n server.quit()","repo_name":"TheUltimateOptimist/tools","sub_path":"command_line_application/src/mailing/email_bot.py","file_name":"email_bot.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26762046431","text":"CSRF_ENABLED = True\nWTF_CSRF_ENABLED = True\nSECRET_KEY = \"ams\"\nMONGODB_DB = \"ams-sih\"\nMONGODB_HOST = \"mongodb\"\nUPLOADS_DEFAULT_DEST = '/var/uploads'\nUPLOAD_FOLDER = '/usr/share/'\nMAIL_SERVER = 'smtp.gmail.com'\nMAIL_PORT = 465\nMAIL_USE_SSL = True\n","repo_name":"Supasitrit/docker_flasks_angular2_mongodb_nginx","sub_path":"backend/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"74121465331","text":"import os\nimport csv\nfrom collections import defaultdict\nimport numpy as np\nimport tensorflow as tf\nfrom rdkit.Chem import AllChem\n\nfrom smi2smi.data_reader import canonicalize, grammar_check\n\n# def grammar_check_for_beam_output(predicted_strings):\n# for strg_beam in predicted_strings:\n \n\ndef evaluate(predicted_strings, groundtruths, top_k, selfies):\n '''\n \n ''' \n n_examples = groundtruths.shape[0]\n canonicalized_predicted_strings = canonicalize(predicted_strings, selfies=selfies)\n n_exact_matches = np.sum(np.any(canonicalized_predicted_strings[:, :top_k]==groundtruths[:, np.newaxis],\n axis=-1))\n \n accuracy = n_exact_matches/n_examples\n \n n_candidates = np.sum(predicted_strings!='')\n n_valid_sequences = np.sum(canonicalized_predicted_strings!='')\n n_invalid_sequences = n_candidates - n_valid_sequences\n return {'n_examples':n_examples,\n 'n_candidates':n_candidates,\n 'n_exact_matches':n_exact_matches,\n 'accuracy':accuracy,\n 'n_valid_sequences':n_valid_sequences,\n 'n_invalid_sequences':n_invalid_sequences}\n\ndef rxn_sort(sequences, source_rxn_labels: list):\n available_labels = [f'' for i in range(10)]\n source_rxn_labels = np.array(source_rxn_labels)\n sorted_sequences = {}\n for available_label in available_labels:\n idx, = np.where(source_rxn_labels==available_label)\n sampled_sequences = sequences[idx]\n sorted_sequences[available_label] = sampled_sequences\n return sorted_sequences\n\ndef rxn_aware_evaluate(predicted_sequences:np.ndarray, \n groundtruths:np.ndarray, \n top_k, \n source_rxn_labels, \n selfies):\n outputs = []\n overall_eval = evaluate(predicted_sequences, groundtruths, top_k, selfies)\n overall_eval['reaction_class'] = 'overall'\n outputs.append(overall_eval)\n sorted_groundtruths = rxn_sort(groundtruths, source_rxn_labels)\n sorted_predicted_sequences = rxn_sort(predicted_sequences, source_rxn_labels)\n for rxn_class, rxn_sequence_set in sorted_predicted_sequences.items():\n rxn_eval = evaluate(rxn_sequence_set, sorted_groundtruths[rxn_class], top_k, selfies)\n rxn_eval['reaction_class'] = rxn_class\n outputs.append(rxn_eval)\n return outputs \n\ndef writeout_evaluation_results(eval_results, write_dir):\n fieldnames = ['reaction_class', 'n_examples', 'n_candidates', 'n_exact_matches', 'accuracy',\n 'n_valid_sequences', 'n_invalid_sequences']\n with open(write_dir, 'w') as file:\n writer = csv.DictWriter(file, fieldnames=fieldnames)\n writer.writeheader()\n writer.writerows(eval_results)\n print(f'Evaluate results are saved at {os.path.abspath(write_dir)}')\n \n\n# def rxn_sort(results, rxn_labels):\n# all_rxn_results = [results]\n# for i in range(10):\n# rxn_class_ids, = np.where(rxn_labels==f''.encode('ascii'))\n# rxn_results = {}\n# rxn_results['top_k'] = results['top_k']\n# rxn_results['n_samples'] = rxn_class_ids.size\n# rxn_results['correct_predictions'] = results['correct_predictions'][rxn_class_ids]\n# rxn_results['correct_prediction_ids'] = results['correct_prediction_ids'][rxn_class_ids]\n# rxn_results['potential_candidates'] = results['potential_candidates'][rxn_class_ids]\n# rxn_results['invalid_candidates'] = results['invalid_candidates'][rxn_class_ids]\n# rxn_results['total_em_score'] = sum(array.size>0 for array in rxn_results['correct_predictions'])\n# rxn_results['accuracy'] = rxn_results['total_em_score']/rxn_results['n_samples']\n# rxn_results['n_valid_smis'] = \\\n# sum(rxn_results['correct_predictions'][i].size+rxn_results['potential_candidates'][i].size\\\n# for i in range(len(rxn_class_ids)))\n# rxn_results['n_invalid_smis'] = sum(array.size for array in rxn_results['invalid_candidates'])\n# rxn_results['n_candidates'] = rxn_results['n_valid_smis']+rxn_results['n_invalid_smis']\n# rxn_results['index'] = rxn_class_ids\n# all_rxn_results.append(rxn_results)\n# return all_rxn_results\n\n# def print_rxn_class_results(results, ground_truths, source_smis, rxn_label, result_dir):\n# with open(os.path.join(result_dir, rxn_label+'.txt'), 'w') as file:\n# def _write_nested_array(nested_array, index_in_sub_list=None):\n# for i, prediction_array in enumerate(nested_array):\n# file.write('{:04d}.\\n'.format(results['index'][i]))\n# for j, prediction in enumerate(prediction_array):\n# if index_in_sub_list is not None:\n# rank = ' (rank_{:02d})'.format(index_in_sub_list[i][j]+1)\n# else:\n# rank = ''\n# file.write(prediction+'>>'+source_smis[results['index'][i]].decode('ascii')+rank+'\\n')\n# file.write('Ground truth:\\n'+ground_truths[results['index'][i]].decode('ascii')\\\n# +'>>'+source_smis[results['index'][i]].decode('ascii')+'\\n')\n \n# file.write('====={}=====\\n'.format(rxn_label.upper()))\n# file.write('\\nContents: METRICS, CORRECT PREDICTIONS, POTENTIAL PREDICTIONS, INVALID SMILES.\\n')\n# file.write('\\nMETRICS:\\n')\n# file.write('Number of samples: {}\\n'.format(results['n_samples']))\n# file.write('Number of candidates: {}\\n'.format(results['n_candidates']))\n# file.write('Number of exact matches: {}\\n'.format(results['total_em_score']))\n# file.write('Accuracy: {:.06f}\\n'.format(results['accuracy']))\n# file.write('Number of valid SMILES: {}\\n'.format(results['n_valid_smis']))\n# file.write('Number of invalid SMILES: {}\\n\\n'.format(results['n_invalid_smis']))\n \n# file.write('CORRECT PREDICTIONS:\\n')\n# _write_nested_array(results['correct_predictions'], results['correct_prediction_ids'])\n \n# file.write('\\nPOTENTIAL PREDICTIONS:\\n')\n# _write_nested_array(results['potential_candidates'])\n \n# file.write('\\nINVALID SMILES:\\n')\n# _write_nested_array(results['invalid_candidates'])\n \n# def print_results(all_results, ground_truths, source_smis, beam_results_dir):\n# results_dir = os.path.join(beam_results_dir, 'top_{}_results/'.format(all_results[0]['top_k']))\n# if not os.path.exists(results_dir):\n# os.mkdir(results_dir)\n# label = ['general']+[f'rx_{i+1}' for i in range(10)]\n# for i in range(11):\n# print_rxn_class_results(all_results[i], ground_truths, source_smis, label[i], results_dir)","repo_name":"XuanVuNguyen/smi2smi","sub_path":"smi2smi/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":6805,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"14535307705","text":"\"\"\"provides functions to kill the script by raising SystemExit\"\"\"\n\nimport sys\nfrom typing import List, NoReturn\n\ndef assert_empty(blocked_actions: List[str]) -> None:\n \"\"\"used with validate_perms, which returns list of denied AWS actions\"\"\"\n if blocked_actions:\n err(\"IAM user missing following permission(s):\",\n *sorted(list(set(blocked_actions))))\n\n\ndef err(*halt_messages: str) -> NoReturn:\n \"\"\"prepend \"Error: \" to first halt message, then halt\"\"\"\n halt_msg_list = list(halt_messages)\n halt_msg_list[0] = f\"Error: {halt_messages[0]}\"\n stop(*halt_msg_list)\n\n\ndef stop(*halt_messages: str) -> NoReturn:\n \"\"\"halts the script by raising SystemExit\"\"\"\n if halt_messages:\n print(\"\")\n print(\"\\n\".join(halt_messages), file=sys.stderr, flush=True)\n sys.exit(1)\n","repo_name":"TakingItCasual/ec2mc","sub_path":"ec2mc/utils/halt.py","file_name":"halt.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"858809791","text":"# -*- coding: utf-8 -*-\n\nimport math\nimport importlib\nfrom opentrons import protocol_api\n\n# Load library\nLIBRARY_PATH = '/root/ot2-covid19/library/'\nspec = importlib.util.spec_from_file_location(\"library.protocols.common_functions\",\n \"{}protocols/common_functions.py\".format(LIBRARY_PATH))\ncommon = importlib.util.module_from_spec(spec)\nspec.loader.exec_module(common)\n\n# Load Brands & other stuff\nspec2 = importlib.util.spec_from_file_location(\"library.protocols.lab_stuff\",\n \"{}protocols/lab_stuff.py\".format(LIBRARY_PATH))\nlab_stuff = importlib.util.module_from_spec(spec2)\nspec2.loader.exec_module(lab_stuff)\n\n\nmetadata = {\n 'protocolName': 'C2',\n 'author': 'Luis Lorenzo Mosquera, Victor Soñora Pombo & Ismael Castiñeira Paz',\n 'source': 'Hospital Clínico Universitario de Santiago (CHUS)',\n 'apiLevel': '2.0',\n 'description': 'Creates RNAteca, in other words, dispense 40ul from deep-weel to 1.5ml Eppendorf tubes'\n}\n\n# ------------------------\n# Protocol parameters\n# ------------------------\nNUM_SAMPLES = 16\nbrand_name = 'vircell'\n\nx_offset = [0, 0]\nvolume_source = 19 # FIXME: no deja aspirar 20?!\nair_gap_vol_source = 1\ndiameter_sample = 8.25\narea_section_sample = (math.pi * diameter_sample**2) / 4\n\n(brand_master_mix, arn) = lab_stuff.brands(brand_name)\n\nsample = {\n 'name': 'RNA samples',\n 'flow_rate_aspirate': 1,\n 'flow_rate_dispense': 1,\n 'rinse': False,\n 'delay': 0,\n 'reagent_reservoir_volume': 20 * 24,\n 'num_wells': 24,\n 'h_cono': 4,\n 'v_cono': 4 * area_section_sample * diameter_sample * 0.5 / 3,\n 'vol_well_original': 20,\n 'vol_well': 20,\n 'unused': [],\n 'col': 0,\n 'vol_well': 0\n}\n\n# following volumes in ul\nmaster_mix = {\n 'name': 'master mix',\n 'flow_rate_aspirate': 1,\n 'flow_rate_dispense': 1,\n 'rinse': False,\n 'delay': 0,\n 'reagent_reservoir_volume': 1500,\n 'num_wells': 1,\n 'h_cono': 4,\n 'v_cono': 4 * area_section_sample * diameter_sample * 0.5 / 3,\n 'vol_well_original': 1500,\n 'vol_well': 1500,\n 'unused': [],\n 'col': 0,\n 'vol_well': 0\n}\n\n\n# ----------------------------\n# Main\n# ----------------------------\ndef run(ctx: protocol_api.ProtocolContext):\n # ------------------------\n # Load LabWare\n # ------------------------\n # Tip racks\n tips = [ctx.load_labware('opentrons_96_filtertiprack_20ul', slot, '20µl filter tiprack') for slot in ['11']]\n\n # Pipette\n p20 = ctx.load_instrument('p20_single_gen2', 'right', tip_racks=tips)\n\n # Source (in this case NUM_SAMPLES well plate)\n source_plate = ctx.load_labware('abgene_96_wellplate_800ul', '5', 'ABGENE 96 Well Plate 800 µL')\n sources = source_plate.wells()[:NUM_SAMPLES]\n\n # Destination (in this case NUM_SAMPLES 1.5ml Eppendorf tubes)\n rack_num = math.ceil(NUM_SAMPLES / 24) if NUM_SAMPLES < 96 else 4\n destination_racks = [ctx.load_labware(\n 'opentrons_24_tuberack_generic_2ml_screwcap', slot,\n 'source tuberack with screwcap' + str(i + 1)) for i, slot in enumerate(['8', '4', '6', '2'][:rack_num])\n ]\n destination_racks_full = common.generate_source_table(destination_racks)\n destinations = destination_racks_full[:NUM_SAMPLES]\n\n # ------------------\n # Protocol\n # ------------------\n for s, d in zip(sources, destinations):\n if not p20.hw_pipette['has_tip']:\n common.pick_up(p20)\n\n # 2 * 20ul ~> 40ul of rna sample\n for _ in range(2):\n common.move_vol_multichannel(ctx, p20, reagent=master_mix, source=s, dest=d,\n vol=brand_master_mix, air_gap_vol=air_gap_vol_source,\n x_offset=x_offset, pickup_height=1, disp_height=-10,\n blow_out=True, touch_tip=True)\n # Drop pipette tip\n p20.drop_tip()\n","repo_name":"IPardelo/ot2-covid19","sub_path":"chus_protocols/protocolos_c/rnateca_protocol.py","file_name":"rnateca_protocol.py","file_ext":"py","file_size_in_byte":3920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13219623416","text":"# -*- coding: utf-8 -*-\r\n# %reset -f\r\n\"\"\"\r\n@author: Hiromasa Kaneko\r\n\"\"\"\r\nimport math\r\n\r\nimport numpy as np\r\nimport numpy.matlib\r\nfrom scipy.spatial import distance\r\nfrom sklearn.base import is_classifier, clone\r\nfrom sklearn.model_selection._search import BaseSearchCV\r\nfrom sklearn.model_selection._split import _validate_shuffle_split\r\nfrom sklearn.model_selection import ParameterGrid, StratifiedKFold, KFold, cross_val_predict, GroupShuffleSplit, ShuffleSplit, StratifiedShuffleSplit\r\nfrom sklearn.metrics import r2_score, accuracy_score\r\nfrom sklearn.utils import indexable, _safe_indexing\r\nfrom sklearn.utils.validation import _num_samples\r\nfrom itertools import chain\r\n\r\nclass DCEGridSearchCV(BaseSearchCV):\r\n \"\"\"\r\n Hyperparameter optimization with grid search and cross-validation,\r\n which is similar to GridSearchCV in scikit-learn https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html\r\n \r\n Parameters\r\n ----------\r\n Parameters are basically the same as the ones in GridSearchCV, KFold, and StratifiedKFold\r\n GridSearchCV : https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html\r\n KFold : https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.KFold.html\r\n StratifiedKFold : https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedKFold.html#sklearn.model_selection.StratifiedKFold\r\n \r\n \"\"\"\r\n def __init__(\r\n self,\r\n estimator,\r\n param_grid,\r\n *,\r\n scoring=None,\r\n n_jobs=None,\r\n refit=True,\r\n cv=None,\r\n verbose=0,\r\n pre_dispatch=\"2*n_jobs\",\r\n error_score=np.nan,\r\n return_train_score=False,\r\n random_state = None,\r\n shuffle = True,\r\n display_flag = False,\r\n ):\r\n super().__init__(\r\n estimator=estimator,\r\n scoring=scoring,\r\n n_jobs=n_jobs,\r\n refit=refit,\r\n cv=cv,\r\n verbose=verbose,\r\n pre_dispatch=pre_dispatch,\r\n error_score=error_score,\r\n return_train_score=return_train_score,\r\n )\r\n self.param_grid = param_grid\r\n self.random_state = random_state\r\n self.shuffle = shuffle\r\n self.display_flag = display_flag\r\n def fit(self, x, y):\r\n if type(self.cv) == int:\r\n if is_classifier(self.estimator):\r\n cross_validation = StratifiedKFold(n_splits=self.cv, random_state=self.random_state, shuffle=self.shuffle)\r\n else:\r\n cross_validation = KFold(n_splits=self.cv, random_state=self.random_state, shuffle=self.shuffle)\r\n else:\r\n cross_validation = self.cv\r\n \r\n param_dicts = list(ParameterGrid(self.param_grid))\r\n scores = []\r\n for i, param_dict in enumerate(param_dicts):\r\n self.estimator.set_params(**param_dict)\r\n estimated_y_in_cv = cross_val_predict(self.estimator, x, y, cv=cross_validation,\r\n n_jobs=self.n_jobs, verbose=self.verbose, \r\n pre_dispatch=self.pre_dispatch)\r\n if is_classifier(self.estimator):\r\n score = accuracy_score(y, estimated_y_in_cv)\r\n else:\r\n score = r2_score(y, estimated_y_in_cv)\r\n if self.display_flag:\r\n print(i + 1, '/', len(param_dicts), '... ' 'score :', score)\r\n scores.append(score)\r\n self.best_score_ = max(scores)\r\n self.best_index_ = scores.index(self.best_score_)\r\n self.best_params_ = param_dicts[self.best_index_]\r\n self.best_estimator_ = clone(self.estimator)\r\n self.best_estimator_.set_params(**self.best_params_)\r\n self.cv_results_ = {'params': param_dicts, 'score': scores}\r\n \r\n def predict(self, x):\r\n return self.best_estimator_.predict(x)\r\n\r\n\r\ndef midknn(x, k):\r\n \"\"\"\r\n Midpoints between k-nearest-neighbor data points (midknn)\r\n\r\n Calculate index of midknn of training dataset for validation dataset in regression\r\n\r\n Parameters\r\n ----------\r\n x: numpy.array or pandas.DataFrame\r\n (autoscaled) m x n matrix of X-variables of training data,\r\n m is the number of training sammples and\r\n n is the number of X-variables\r\n k : int\r\n The number of neighbors\r\n\r\n Returns\r\n -------\r\n midknn_index : numpy.array\r\n indexes of two samples for midpoints between k-nearest-neighbor data points\r\n \"\"\"\r\n\r\n x = np.array(x)\r\n x_distance = distance.cdist(x, x)\r\n sample_pair_numbers = np.argsort(x_distance, axis=1)\r\n sample_pair_numbers = sample_pair_numbers[:, 1:k + 1]\r\n\r\n midknn_index = np.empty((x.shape[0] * k, 2), dtype='int64')\r\n for nearest_sample_number in range(k):\r\n midknn_index[nearest_sample_number * x.shape[0]:(nearest_sample_number + 1) * x.shape[0], 0] = \\\r\n np.arange(x.shape[0])\r\n midknn_index[nearest_sample_number * x.shape[0]:(nearest_sample_number + 1) * x.shape[0], 1] = \\\r\n sample_pair_numbers[:, nearest_sample_number]\r\n\r\n return midknn_index\r\n\r\n\r\ndef make_midknn_dataset(x, y, k):\r\n \"\"\"\r\n Midpoints between k-nearest-neighbor data points (midknn)\r\n\r\n Get dataset of midknn\r\n\r\n Parameters\r\n ----------\r\n x : numpy.array or pandas.DataFrame\r\n (autoscaled) m x n matrix of X-variables of training data,\r\n m is the number of training sammples and\r\n n is the number of X-variables\r\n y : numpy.array or pandas.DataFrame\r\n (autoscaled) m x 1 vector of a Y-variable of training data\r\n k : int\r\n The number of neighbors\r\n\r\n Returns\r\n -------\r\n x_midknn : numpy.array\r\n x of midknn\r\n y_midknn : numpy.array\r\n y of midknn\r\n \"\"\"\r\n\r\n x = np.array(x)\r\n y = np.array(y)\r\n midknn_index = midknn(x, k) # generate indexes of midknn\r\n x_midknn = (x[midknn_index[:, 0], :] + x[midknn_index[:, 1], :]) / 2\r\n y_midknn = (y[midknn_index[:, 0]] + y[midknn_index[:, 1]]) / 2\r\n\r\n return x_midknn, y_midknn\r\n\r\n\r\ndef double_cross_validation(gs_cv, x, y, outer_fold_number, do_autoscaling=True, random_state=None):\r\n \"\"\"\r\n Double Cross-Validation (DCV)\r\n\r\n Estimate y-values in DCV\r\n\r\n Parameters\r\n ----------\r\n gs_cv : object of GridSearchCV (sklearn.model_selection.GridSearchCV)\r\n for more details, please go to https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html\r\n x : numpy.array or pandas.DataFrame\r\n m x n matrix of X-variables of training data,\r\n m is the number of training sammples and\r\n n is the number of X-variables\r\n y : numpy.array or pandas.DataFrame\r\n m x 1 vector of a Y-variable of training data\r\n outer_fold_number : int\r\n Fold number in outer CV (fold number in inner CV is included in gs_cv)\r\n do_autoscaling : bool\r\n flag of autoscaling, if True, do autoscaling\r\n random_state : int, default None\r\n random seed, if None, random seed is not set\r\n\r\n Returns\r\n -------\r\n estimated_y : numpy.array\r\n estimated y-values in DCV\r\n \"\"\"\r\n\r\n x = np.array(x)\r\n y = np.array(y)\r\n\r\n # how to divide datase in outer CV\r\n min_number = math.floor(x.shape[0] / outer_fold_number)\r\n mod_number = x.shape[0] - min_number * outer_fold_number\r\n index = np.matlib.repmat(np.arange(1, outer_fold_number + 1, 1), 1, min_number).ravel()\r\n if mod_number != 0:\r\n index = np.r_[index, np.arange(1, mod_number + 1, 1)]\r\n if random_state != None:\r\n np.random.seed(random_state)\r\n fold_index_in_outer_cv = np.random.permutation(index)\r\n np.random.seed()\r\n\r\n estimated_y = np.zeros(len(y))\r\n for fold_number_in_outer_cv in np.arange(1, outer_fold_number + 1, 1):\r\n print(fold_number_in_outer_cv, '/', outer_fold_number)\r\n # divide training data and test data\r\n x_train = x[fold_index_in_outer_cv != fold_number_in_outer_cv, :].copy()\r\n y_train = y[fold_index_in_outer_cv != fold_number_in_outer_cv].copy()\r\n x_test = x[fold_index_in_outer_cv == fold_number_in_outer_cv, :].copy()\r\n # shuffle samples\r\n if random_state != -999:\r\n np.random.seed(random_state)\r\n random_numbers = np.random.permutation(np.arange(x_train.shape[0]))\r\n x_train = x_train[random_numbers, :]\r\n y_train = y_train[random_numbers]\r\n np.random.seed()\r\n # autoscaling\r\n if do_autoscaling:\r\n autoscaled_x_train = (x_train - x_train.mean(axis=0)) / x_train.std(axis=0, ddof=1)\r\n autoscaled_y_train = (y_train - y_train.mean()) / y_train.std(ddof=1)\r\n autoscaled_x_test = (x_test - x_train.mean(axis=0)) / x_train.std(axis=0, ddof=1)\r\n else:\r\n autoscaled_x_train = x_train.copy()\r\n autoscaled_y_train = y_train.copy()\r\n autoscaled_x_test = x_test.copy()\r\n # inner CV\r\n gs_cv.fit(autoscaled_x_train, autoscaled_y_train)\r\n # modeling\r\n model = getattr(gs_cv, 'estimator')\r\n hyperparameters = list(gs_cv.best_params_.keys())\r\n for hyperparameter in hyperparameters:\r\n setattr(model, hyperparameter, gs_cv.best_params_[hyperparameter])\r\n model.fit(autoscaled_x_train, autoscaled_y_train)\r\n # prediction\r\n estimated_y_test = np.ndarray.flatten(model.predict(autoscaled_x_test))\r\n if do_autoscaling:\r\n estimated_y_test = estimated_y_test * y_train.std(ddof=1) + y_train.mean()\r\n\r\n estimated_y[fold_index_in_outer_cv == fold_number_in_outer_cv] = estimated_y_test # 格納\r\n\r\n return estimated_y\r\n\r\ndef double_cross_validation_group(gs_cv, x, y, groups, outer_fold_number, do_autoscaling=True, random_state=None):\r\n \"\"\"\r\n Double Cross-Validation (DCV) with groups\r\n Train and test are randomly selected according to a third-party provided group,\r\n\r\n Estimate y-values in DCV\r\n\r\n Parameters\r\n ----------\r\n gs_cv : object of GridSearchCV (sklearn.model_selection.GridSearchCV)\r\n for more details, please go to https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html\r\n x : numpy.array or pandas.DataFrame\r\n m x n matrix of X-variables of training data,\r\n m is the number of training sammples and\r\n n is the number of X-variables\r\n y : numpy.array or pandas.DataFrame\r\n m x 1 vector of a Y-variable of training data\r\n groups : numpy.array or pandas.DataFrame\r\n m x 1 vector of group ID of training data\r\n outer_fold_number : int\r\n Fold number in outer CV (fold number in inner CV is included in gs_cv)\r\n do_autoscaling : bool\r\n flag of autoscaling, if True, do autoscaling\r\n random_state : int, default None\r\n random seed, if None, random seed is not set\r\n\r\n Returns\r\n -------\r\n estimated_y : numpy.array\r\n estimated y-values in DCV\r\n \"\"\"\r\n\r\n x = np.array(x)\r\n y = np.array(y)\r\n groups = np.array(groups)\r\n \r\n unique_groups = np.array(list(set(groups)))\r\n # how to divide datase in outer CV\r\n kf = KFold(n_splits=outer_fold_number, shuffle=True, random_state=random_state)\r\n estimated_y = np.zeros(len(y))\r\n fold_number_in_outer_cv = 0\r\n for train_group_idx, test_group_idx in kf.split(unique_groups):\r\n fold_number_in_outer_cv += 1\r\n print(fold_number_in_outer_cv, '/', outer_fold_number)\r\n train_group_numbers, test_group_numbers = unique_groups[train_group_idx], unique_groups[test_group_idx]\r\n # group to sample number\r\n train_sample_numbers = np.array([], dtype=np.int64)\r\n for i in train_group_numbers:\r\n numbers = np.where(groups == i)[0]\r\n if len(numbers):\r\n train_sample_numbers = np.r_[train_sample_numbers, numbers]\r\n test_sample_numbers = np.array([], dtype=np.int64)\r\n for i in test_group_numbers:\r\n numbers = np.where(groups == i)[0]\r\n if len(numbers):\r\n test_sample_numbers = np.r_[test_sample_numbers, numbers]\r\n\r\n # divide training data and test data\r\n x_train = x[train_sample_numbers, :].copy()\r\n y_train = y[train_sample_numbers].copy()\r\n x_test = x[test_sample_numbers, :].copy()\r\n # shuffle samples\r\n if random_state != -999:\r\n np.random.seed(random_state)\r\n random_numbers = np.random.permutation(np.arange(x_train.shape[0]))\r\n x_train = x_train[random_numbers, :]\r\n y_train = y_train[random_numbers]\r\n np.random.seed()\r\n # autoscaling\r\n if do_autoscaling:\r\n autoscaled_x_train = (x_train - x_train.mean(axis=0)) / x_train.std(axis=0, ddof=1)\r\n autoscaled_y_train = (y_train - y_train.mean()) / y_train.std(ddof=1)\r\n autoscaled_x_test = (x_test - x_train.mean(axis=0)) / x_train.std(axis=0, ddof=1)\r\n else:\r\n autoscaled_x_train = x_train.copy()\r\n autoscaled_y_train = y_train.copy()\r\n autoscaled_x_test = x_test.copy()\r\n # inner CV\r\n gs_cv.fit(autoscaled_x_train, autoscaled_y_train)\r\n # modeling\r\n model = getattr(gs_cv, 'estimator')\r\n hyperparameters = list(gs_cv.best_params_.keys())\r\n for hyperparameter in hyperparameters:\r\n setattr(model, hyperparameter, gs_cv.best_params_[hyperparameter])\r\n model.fit(autoscaled_x_train, autoscaled_y_train)\r\n # prediction\r\n estimated_y_test = np.ndarray.flatten(model.predict(autoscaled_x_test))\r\n if do_autoscaling:\r\n estimated_y_test = estimated_y_test * y_train.std(ddof=1) + y_train.mean()\r\n\r\n estimated_y[test_sample_numbers] = estimated_y_test # 格納\r\n\r\n return estimated_y\r\n\r\n\r\ndef y_randomization(model, x, y, do_autoscaling=True, random_state=None):\r\n \"\"\"\r\n y-randomization\r\n \r\n Estimated y-values after shuffling y-values of dataset without hyperparameters\r\n\r\n Parameters\r\n ----------\r\n model : model in sklearn before fitting\r\n x : numpy.array or pandas.DataFrame\r\n m x n matrix of X-variables of training data,\r\n m is the number of training sammples and\r\n n is the number of X-variables\r\n y : numpy.array or pandas.DataFrame\r\n m x 1 vector of a Y-variable of training data\r\n do_autoscaling : bool\r\n flag of autoscaling, if True, do autoscaling\r\n random_state : int\r\n random seed, if None, random seed is not set\r\n\r\n Returns\r\n -------\r\n y_shuffle : numpy.array\r\n k x 1 vector of shuffled y-values of training data\r\n estimated_y_shuffle : numpy.array\r\n k x 1 vector of shuffled y-values of randomized training data\r\n \"\"\"\r\n\r\n x = np.array(x)\r\n y = np.array(y)\r\n\r\n if random_state != None:\r\n np.random.seed(random_state)\r\n y_shuffle = np.random.permutation(y)\r\n if do_autoscaling:\r\n autoscaled_x = (x - x.mean(axis=0)) / x.std(axis=0, ddof=1)\r\n autoscaled_y_shuffle = (y_shuffle - y_shuffle.mean()) / y_shuffle.std(ddof=1)\r\n else:\r\n autoscaled_x = x.copy()\r\n autoscaled_y_shuffle = y_shuffle.copy()\r\n\r\n model.fit(autoscaled_x, autoscaled_y_shuffle)\r\n estimated_y_shuffle = np.ndarray.flatten(model.predict(autoscaled_x))\r\n if do_autoscaling:\r\n estimated_y_shuffle = estimated_y_shuffle * y_shuffle.std(ddof=1) + y_shuffle.mean()\r\n\r\n return y_shuffle, estimated_y_shuffle\r\n\r\n\r\ndef y_randomization_with_hyperparam_opt(gs_cv, x, y, do_autoscaling=True, random_state=None):\r\n \"\"\"\r\n y-randomization\r\n \r\n Estimated y-values after shuffling y-values of dataset with hyperparameters\r\n\r\n Parameters\r\n ----------\r\n gs_cv : object of GridSearchCV (sklearn.model_selection.GridSearchCV)\r\n for more details, please go to https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html\r\n x : numpy.array or pandas.DataFrame\r\n m x n matrix of X-variables of training data,\r\n m is the number of training sammples and\r\n n is the number of X-variables\r\n y : numpy.array or pandas.DataFrame\r\n m x 1 vector of a Y-variable of training data\r\n do_autoscaling : bool\r\n flag of autoscaling, if True, do autoscaling\r\n random_state : int\r\n random seed, if None, random seed is not set\r\n\r\n Returns\r\n -------\r\n y_shuffle : numpy.array\r\n k x 1 vector of randomized y-values of training data\r\n estimated_y_shuffle : numpy.array\r\n k x 1 vector of estimated y-values of randomized training data\r\n \"\"\"\r\n\r\n x = np.array(x)\r\n y = np.array(y)\r\n\r\n if random_state != None:\r\n np.random.seed(random_state)\r\n y_shuffle = np.random.permutation(y)\r\n if do_autoscaling:\r\n autoscaled_x = (x - x.mean(axis=0)) / x.std(axis=0, ddof=1)\r\n autoscaled_y_shuffle = (y_shuffle - y_shuffle.mean()) / y_shuffle.std(ddof=1)\r\n else:\r\n autoscaled_x = x.copy()\r\n autoscaled_y_shuffle = y_shuffle.copy()\r\n\r\n # hyperparameter optimiation with cross-validation\r\n gs_cv.fit(autoscaled_x, autoscaled_y_shuffle)\r\n # modeling\r\n model = getattr(gs_cv, 'estimator')\r\n hyperparameters = list(gs_cv.best_params_.keys())\r\n for hyperparameter in hyperparameters:\r\n setattr(model, hyperparameter, gs_cv.best_params_[hyperparameter])\r\n\r\n model.fit(autoscaled_x, autoscaled_y_shuffle)\r\n estimated_y_shuffle = np.ndarray.flatten(model.predict(autoscaled_x))\r\n if do_autoscaling:\r\n estimated_y_shuffle = estimated_y_shuffle * y_shuffle.std(ddof=1) + y_shuffle.mean()\r\n\r\n return y_shuffle, estimated_y_shuffle\r\n\r\n\r\ndef mae_cce(gs_cv, x, y, number_of_y_randomization=30, do_autoscaling=True, random_state=None):\r\n \"\"\"\r\n Chance Correlation‐Excluded Mean Absolute Error (MAEcce)\r\n \r\n Calculate MAEcce\r\n\r\n Parameters\r\n ----------\r\n gs_cv : object of GridSearchCV (sklearn.model_selection.GridSearchCV)\r\n for more details, please go to https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html\r\n x : numpy.array or pandas.DataFrame\r\n m x n matrix of X-variables of training data,\r\n m is the number of training sammples and\r\n n is the number of X-variables\r\n y : numpy.array or pandas.DataFrame\r\n m x 1 vector of a Y-variable of training data\r\n number_of_y_randomization : int, default 30\r\n number of y_randomization\r\n do_autoscaling : bool\r\n flag of autoscaling, if True, do autoscaling\r\n random_state : int\r\n random seed, if None, random seed is not set\r\n\r\n Returns\r\n -------\r\n mae_cce : numpy.array\r\n values of MAEcce\r\n \"\"\"\r\n\r\n x = np.array(x)\r\n y = np.array(y)\r\n\r\n # general analysis\r\n if do_autoscaling:\r\n autoscaled_x = (x - x.mean(axis=0)) / x.std(axis=0, ddof=1)\r\n autoscaled_y = (y - y.mean()) / y.std(ddof=1)\r\n else:\r\n autoscaled_x = x.copy()\r\n autoscaled_y = y.copy()\r\n # hyperparameter optimiation with cross-validation\r\n gs_cv.fit(autoscaled_x, autoscaled_y)\r\n # modeling\r\n model = getattr(gs_cv, 'estimator')\r\n hyperparameters = list(gs_cv.best_params_.keys())\r\n for hyperparameter in hyperparameters:\r\n setattr(model, hyperparameter, gs_cv.best_params_[hyperparameter])\r\n\r\n model.fit(autoscaled_x, autoscaled_y)\r\n estimated_y = np.ndarray.flatten(model.predict(autoscaled_x))\r\n if do_autoscaling:\r\n estimated_y = estimated_y * y.std(ddof=1) + y.mean()\r\n mae_train = float(sum(abs(y - estimated_y)) / len(y))\r\n mae_mean = float(sum(abs(y - y.mean())) / len(y))\r\n\r\n # y-randomization\r\n mae_yrand = []\r\n for y_randomizatoin_number in range(number_of_y_randomization):\r\n if random_state != None:\r\n np.random.seed(random_state + y_randomizatoin_number + 1)\r\n y_rand = np.random.permutation(y)\r\n if do_autoscaling:\r\n autoscaled_y_rand = (y_rand - y_rand.mean()) / y_rand.std(ddof=1)\r\n else:\r\n autoscaled_y_rand = y_rand.copy()\r\n # hyperparameter optimiation with cross-validation\r\n gs_cv.fit(autoscaled_x, autoscaled_y_rand)\r\n # modeling\r\n model = getattr(gs_cv, 'estimator')\r\n hyperparameters = list(gs_cv.best_params_.keys())\r\n for hyperparameter in hyperparameters:\r\n setattr(model, hyperparameter, gs_cv.best_params_[hyperparameter])\r\n model.fit(autoscaled_x, autoscaled_y_rand)\r\n estimated_y_rand = np.ndarray.flatten(model.predict(autoscaled_x))\r\n if do_autoscaling:\r\n estimated_y_rand = estimated_y_rand * y_rand.std(ddof=1) + y_rand.mean()\r\n mae_yrand.append(float(sum(abs(y_rand - estimated_y_rand)) / len(y_rand)))\r\n\r\n mae_cce = mae_train + mae_mean - np.array(mae_yrand)\r\n\r\n return mae_cce\r\n\r\ndef train_test_split_group(\r\n *arrays,\r\n test_size=None,\r\n train_size=None,\r\n groups=None,\r\n random_state=None,\r\n shuffle=True,\r\n stratify=None,\r\n):\r\n\r\n \"\"\"\r\n Split arrays or matrices into random train and test subsets according to a third-party provided group,\r\n which is similar to train_test_split in scikit-learn https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html\r\n \r\n Parameters\r\n ----------\r\n Parameters are basically the same as the ones in train_test_split, https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html\r\n \r\n groups : numpy.array or pandas.DataFrame\r\n m x 1 vector of group ID\r\n \"\"\"\r\n\r\n n_arrays = len(arrays)\r\n if n_arrays == 0:\r\n raise ValueError(\"At least one array required as input\")\r\n\r\n arrays = indexable(*arrays)\r\n \r\n if groups is not None:\r\n n_samples = len(set(groups))\r\n else:\r\n n_samples = _num_samples(arrays[0])\r\n n_train, n_test = _validate_shuffle_split(\r\n n_samples, test_size, train_size, default_test_size=0.25\r\n )\r\n\r\n if shuffle is False:\r\n if stratify is not None:\r\n raise ValueError(\r\n \"Stratified train/test split is not implemented for shuffle=False\"\r\n )\r\n\r\n train = np.arange(n_train)\r\n test = np.arange(n_train, n_train + n_test)\r\n\r\n else:\r\n if groups is not None:\r\n print()\r\n cv = GroupShuffleSplit(test_size=n_test, train_size=n_train, random_state=random_state)\r\n train, test = next(cv.split(X=arrays[0], y=stratify, groups=groups))\r\n else:\r\n if stratify is not None:\r\n CVClass = StratifiedShuffleSplit\r\n else:\r\n CVClass = ShuffleSplit\r\n cv = CVClass(test_size=n_test, train_size=n_train, random_state=random_state)\r\n train, test = next(cv.split(X=arrays[0], y=stratify))\r\n \r\n\r\n return list(\r\n chain.from_iterable(\r\n (_safe_indexing(a, train), _safe_indexing(a, test)) for a in arrays\r\n )\r\n ) \r\n \r\n","repo_name":"hkaneko1985/dcekit","sub_path":"dcekit/validation/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":23177,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"21"} +{"seq_id":"7371827031","text":"import requests, os\nimport numpy as np\nimport pandas as pd\nimport extract_earth_chem\nfrom parameters import parameters\nimport pygplates\nimport Utils\n\ndef run():\n if not os.path.isdir(Utils.get_coreg_input_dir()):\n os.mkdir(Utils.get_coreg_input_dir())\n \n earth_chem_file = 'EarthChem_all.csv'\n polygon_points = Utils.get_region_of_interest_polygon().values.flatten()\n deposit_points = extract_earth_chem.query(earth_chem_file, 'AU', polygon_points)\n mesh_points = Utils.get_mesh_points(polygon_points)\n\n #first, let's find plate id for those deposits\n static_polygons = pygplates.FeatureCollection(parameters['static_polygons_file'])\n rotation_model = pygplates.RotationModel(Utils.get_files(parameters['rotation_files']))\n plate_ids = Utils.get_plate_id(deposit_points.LONGITUDE.tolist(), deposit_points.LATITUDE.tolist(), \n static_polygons, rotation_model)\n\n deposit_points.rename(columns = {'LONGITUDE':'lon', 'AGE':'age', 'LATITUDE':'lat'}, inplace = True) \n deposit_points.age = np.round(deposit_points.age)\n deposit_points = deposit_points.astype({\"age\": int}) \n deposit_points = deposit_points[['lon', 'lat', 'age']]\n deposit_points['plate_id'] = plate_ids\n\n start_time = parameters['time']['start'] \n end_time = parameters['time']['end']\n time_step = parameters['time']['step']\n\n deposit_points = deposit_points[deposit_points['age']>start_time]\n deposit_points = deposit_points[deposit_points['age'])\n output = tokenizer.decode(output_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)\n print_f(client_ip, f\" out: {output}\")\n\n # Append to history\n history.append((input, output))\n # print(f\"history: {history}\")\n \n return history, history\n\ncss = \"\"\"\n #row_bot{width: 70%; height: var(--size-96); margin: 0 auto}\n #row_bot .block{background: var(--color-grey-100); height: 100%}\n #row_input{width: 70%; margin: 0 auto}\n #row_input .block{background: var(--color-grey-100)}\n\n @media screen and (max-width: 768px) {\n #row_bot{width: 100%; height: var(--size-96); margin: 0 auto}\n #row_bot .block{background: var(--color-grey-100); height: 100%}\n #row_input{width: 100%; margin: 0 auto}\n #row_input .block{background: var(--color-grey-100)} \n }\n \"\"\"\nblock = gr.Blocks(css=css, title=\"Funny Bot\")\n\nwith block:\n gr.Markdown(\n \"\"\"

😜

\"\"\")\n with gr.Row(elem_id='row_bot'):\n chatbot = gr.Chatbot()\n with gr.Row(elem_id='row_input'):\n message = gr.Textbox(placeholder=\"Enter something\")\n state = gr.State([])\n\n message.submit(submit_chat, inputs=[\n message, state], outputs=[chatbot, state])\n message.submit(lambda x: \"\", message, message)\n\n# # Params ex: debug=True, share=True, server_name=\"0.0.0.0\", server_port=5050\nblock.launch(debug=True)","repo_name":"dangduytung/chatbot-DiabloGPT","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1845916142","text":"import typing as tp\n\nimport numpy as np\n\nfrom filterpy.kalman import KalmanFilter\n\n\nDELTA_T = 1 / 30\nSIGMA_U = 20 * 500\nSIGMA_Z = 20\nF = [\n [1, DELTA_T],\n [0, 1],\n]\nH = [\n [1, 0],\n]\nP = [\n [20 ** 2, 0],\n [0, 0.1 ** 2],\n]\nR = [[1]]\nQ = [\n [1/4 * DELTA_T ** 2, 1/2 * DELTA_T],\n [1/2 * DELTA_T, 1],\n]\n\n\ndef init_filter(init_value: tp.Optional[float] = None) -> KalmanFilter:\n kf = KalmanFilter(dim_x=2, dim_z=1, dim_u=0)\n\n kf.x = np.array([\n [init_value or 0.0],\n [0.0],\n ])\n kf.F = np.array(F)\n kf.H = np.array(H)\n kf.P = np.array(P)\n kf.R = np.array(R) * (SIGMA_Z ** 2)\n kf.Q = np.array(Q) * (DELTA_T ** 2) * (SIGMA_U ** 2)\n return kf\n\n\ndef md_sigma(md: float) -> float:\n return 1 + 1 * md ** 2\n\n\nclass KalmanFilters:\n def __init__(\n self,\n width: int,\n height: int,\n intrinsic: np.ndarray\n ) -> None:\n self.width = width\n self.height = height\n self.intrinsic = intrinsic\n\n self.filters = None\n\n def init_filters(\n self,\n world_points: np.ndarray,\n ) -> None:\n if world_points.ndim > 1:\n world_points = world_points.reshape(-1)\n self.filters = [init_filter(value) for value in world_points]\n\n def make_filtering(\n self,\n mp_points: np.ndarray,\n world_points: np.ndarray,\n ) -> np.ndarray:\n if self.filters is None:\n raise Exception('Filters is not initialized!')\n \n if mp_points.ndim > 1:\n mp_points = mp_points.reshape(-1)\n if world_points.ndim > 1:\n world_points = world_points.reshape(-1)\n\n md: float\n output = np.zeros_like(mp_points)\n for j, new_point in enumerate(world_points[2::3]):\n kf_z = self.filters[3*j+2]\n kf_z.predict()\n\n z_res = kf_z.residual_of(new_point)[0, 0]\n md = np.sqrt((z_res * z_res) / kf_z.P[0, 0])\n kf_z.R = np.array(R) * ((md_sigma(md) * SIGMA_Z) ** 2)\n\n if new_point > 50:\n kf_z.update(new_point)\n\n output[3*j+2] = kf_z.x[0, 0]\n\n new_x = (mp_points[3*j+0] * self.width - self.principal_x) / self.focal_x * output[3*j+2]\n new_y = (mp_points[3*j+1] * self.height - self.principal_y) / self.focal_y * output[3*j+2]\n\n kf_x = self.filters[3*j+0]\n kf_y = self.filters[3*j+1]\n kf_x.predict()\n kf_y.predict()\n\n x_res = kf_x.residual_of(new_x)[0, 0]\n md = np.sqrt((x_res * x_res) / kf_x.P[0, 0])\n kf_x.R = np.array(R) * ((md_sigma(md) * SIGMA_Z) ** 2)\n\n y_res = kf_y.residual_of(new_y)[0, 0]\n md = np.sqrt((y_res * y_res) / kf_y.P[0, 0])\n kf_y.R = np.array(R) * ((md_sigma(md) * SIGMA_Z) ** 2)\n\n kf_x.update(new_x)\n kf_y.update(new_y)\n\n output[3*j+0] = kf_x.x[0, 0]\n output[3*j+1] = kf_y.x[0, 0]\n return output\n\n @property\n def focal_x(self) -> float:\n return self.intrinsic[0, 0]\n\n @property\n def focal_y(self) -> float:\n return self.intrinsic[1, 1]\n\n @property\n def principal_x(self) -> float:\n return self.intrinsic[0, 2]\n\n @property\n def principal_y(self) -> float:\n return self.intrinsic[1, 2]\n","repo_name":"GiveMeTheReason/gestures-ros2","sub_path":"mediapipe_extractor/scripts/kalman_filter.py","file_name":"kalman_filter.py","file_ext":"py","file_size_in_byte":3326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13453852394","text":"# 京东商品页面爬虫:小米9手机\n# https://item.jd.com/7437708.html\n\nimport requests\nurl = 'https://item.jd.com/7437708.html'\ntry:\n r = requests.get(url)\n r.raise_for_status()\n r.encoding = r.apparent_encoding\n print(r.text[:1000]) # 可指定返回内容的多少\nexcept:\n print('爬取失败')\n\n\n","repo_name":"Liangkang19/PyCharmFile","sub_path":"SpiderTest/TomSpider/Requests/JDSpider.py","file_name":"JDSpider.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7076448984","text":"import labrad\nfrom Qsim.scripts.experiments.qsimexperiment import QsimExperiment\nfrom Qsim.scripts.pulse_sequences.metastable_three_preparation import metastable_three_preparation as metastable_bright_sequence # this is where I define which state is bright\nfrom Qsim.scripts.pulse_sequences.metastable_four_preparation import metastable_four_preparation as metastable_dark_sequence\n\nimport numpy as np\nfrom labrad.units import WithUnit as U\n\n\nclass metastable_fidelity_tweak_up(QsimExperiment):\n \"\"\"\n \n \"\"\"\n\n name = 'Metastable Fidelity Tweak Up'\n \n exp_parameters = []\n\n exp_parameters.append(('ShelvingStateDetection', 'repetitions'))\n exp_parameters.append(('ShelvingStateDetection', 'state_readout_threshold'))\n\n exp_parameters.extend(metastable_bright_sequence.all_required_parameters())\n exp_parameters.extend(metastable_dark_sequence.all_required_parameters())\n\n def initialize(self, cxn, context, ident):\n self.ident = ident\n self.pulser = cxn.pulser\n self.context = context\n\n def run(self, cxn, context):\n\n self.p['Line_Selection.qubit'] = 'qubit_0'\n self.p['MicrowaveInterrogation.duration'] = self.p.Pi_times.qubit_0\n self.p['MicrowaveInterrogation.detuning'] = U(0.0, 'kHz')\n\n self.p['Metastable_Microwave_Interrogation.duration'] = self.p.Pi_times.metastable_qubit\n self.p['Metastable_Microwave_Interrogation.detuning'] = U(0.0, 'kHz')\n\n self.p['Modes.state_detection_mode'] = 'Shelving'\n\n self.setup_prob_datavault()\n\n i = 0\n while True:\n i += 1\n\n should_break = self.update_progress(np.random.random())\n if should_break:\n break\n\n # programs and runs the bright state sequence, then creates an array with exp number, detection\n # counts, and doppler counts to be saved to datavault\n self.program_pulser(metastable_bright_sequence)\n [counts_doppler_bright, counts_herald_bright_1, counts_herald_bright_2, counts_bright] = self.run_sequence(max_runs=250, num=4)\n failed_four_heralding_bright = np.where(counts_herald_bright_1 >= self.p.ShelvingStateDetection.state_readout_threshold)\n failed_three_heralding_bright = np.where(counts_herald_bright_2 >= self.p.ShelvingStateDetection.state_readout_threshold)\n doppler_errors_bright = np.where(counts_doppler_bright <= self.p.Shelving_Doppler_Cooling.doppler_counts_threshold)\n all_bright_errors = np.unique(np.concatenate((failed_three_heralding_bright[0], failed_four_heralding_bright[0], doppler_errors_bright[0])))\n counts_bright_fixed = np.delete(counts_bright, all_bright_errors)\n\n\n self.program_pulser(metastable_dark_sequence)\n [counts_doppler_dark, counts_herald_dark, counts_dark] = self.run_sequence(max_runs=333, num=3)\n failed_heralding_dark = np.where(counts_herald_dark >= self.p.ShelvingStateDetection.state_readout_threshold)\n doppler_errors_dark = np.where(counts_doppler_dark <= self.p.Shelving_Doppler_Cooling.doppler_counts_threshold)\n all_dark_errors = np.unique(np.concatenate((failed_heralding_dark[0], doppler_errors_dark[0])))\n counts_dark_fixed = np.delete(counts_dark, all_dark_errors)\n \n\n # this processes the counts and calculates the fidelity and plots it on the bottom panel\n self.plot_prob(i, counts_dark_fixed, counts_bright_fixed)\n\n # process the count_bins and return the histogram with bins and photon counts/bin\n hist_bright = self.process_data(counts_bright_fixed)\n hist_dark = self.process_data(counts_dark_fixed)\n\n # this part plots the histograms on the hist panel in the shelving_fidelity tab\n self.plot_hist(hist_bright, folder_name='Metastable_Bright_Histogram')\n self.plot_hist(hist_dark, folder_name='Metastable_Dark_Histogram')\n\n\n\n def plot_prob(self, num, counts_dark, counts_bright):\n prob_dark = self.get_pop(counts_dark)\n prob_bright = self.get_pop(counts_bright)\n self.dv.add(num, prob_dark, prob_bright,\n prob_bright - prob_dark, context=self.prob_context)\n\n def setup_prob_datavault(self):\n self.prob_context = self.dv.context()\n self.dv.cd(['', 'Metastable_Fidelity'], True, context=self.prob_context)\n\n self.dataset_prob = self.dv.new('metastable_fidelity', [('run', 'prob')],\n [('Prob', 'bright_prep', 'num'),\n ('Prob', 'dark_prep', 'num'),\n ('Prob', 'contrast', 'num')], context=self.prob_context)\n self.grapher.plot(self.dataset_prob, 'Fidelity', False)\n for parameter in self.p:\n self.dv.add_parameter(parameter, self.p[parameter], context=self.prob_context)\n\n def finalize(self, cxn, context):\n pass\n\nif __name__ == '__main__':\n cxn = labrad.connect()\n scanner = cxn.scriptscanner\n exprt = metastable_fidelity_tweak_up(cxn=cxn)\n ident = scanner.register_external_launch(exprt.name)\n exprt.execute(ident)\n","repo_name":"johnpalsberg/John-Palsberg","sub_path":"QsimMaster/scripts/experiments/Metastable_Fidelity_Tweak_Up/metastable_fidelity_tweak_up.py","file_name":"metastable_fidelity_tweak_up.py","file_ext":"py","file_size_in_byte":5185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20572423601","text":"\"\"\"\nwhile i * i <= n: 이 조건은 왜 붙이는거야?\n이 조건은 n의 약수 중 가장 큰 값은 n의 제곱근을 넘을 수 없다는 점을 이용하기 위해 추가한 것입니다.\n만약 i가 n의 제곱근보다 큰 경우에는 n을 나눌 수 있는 값이 더 이상 존재하지 않으므로 반복문을 종료하게 됩니다.\n따라서 반복문을 최소한으로 실행하여 시간 복잡도를 줄일 수 있습니다.\n\"\"\"\ndef solution(n):\n i = 2 # 소수는 2부터 시작한다.\n factors = set() # 중복된 값은 제거해야하므로 리스트가 아닌 set을 사용한다.\n while i * i <= n: # i가 n의 제곱근보다 큰 경우에는 n을 나눌 수 있는\n if n % i:\n i += 1\n else:\n n //= i # n은 i로 나눈 정수몫\n factors.add(i) # set factors에 저장\n if n > 1: # n이 1보다 크면 소인수 분해 구성원이기 때문에 추가해야 한다.\n factors.add(n)\n return sorted(list(factors))\n\n","repo_name":"HeeSeok-kim/algorithm_Study","sub_path":"day 12/SeungTae/소인수분해.py","file_name":"소인수분해.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4627139423","text":"from logging import getLogger\nimport warnings\n\nimport chainer\nimport chainer.functions as F\nimport numpy as np\n\nimport chainerrl\nfrom chainerrl import agent\nfrom chainerrl.recurrent import Recurrent\n\n\nclass REINFORCE(agent.AttributeSavingMixin, agent.Agent):\n \"\"\"William's episodic REINFORCE.\n\n Args:\n model (Policy): Model to train. It must be a callable that accepts\n observations as input and return action distributions\n (Distribution).\n optimizer (chainer.Optimizer): optimizer used to train the model\n beta (float): Weight coefficient for the entropy regularizaiton term.\n normalize_loss_by_steps (bool): If set true, losses are normalized by\n the number of steps taken to accumulate the losses\n act_deterministically (bool): If set true, choose most probable actions\n in act method.\n batchsize (int): Number of episodes used for each update\n backward_separately (bool): If set true, call backward separately for\n each episode and accumulate only gradients.\n average_entropy_decay (float): Decay rate of average entropy. Used only\n to record statistics.\n batch_states (callable): Method which makes a batch of observations.\n default is `chainerrl.misc.batch_states`\n logger (logging.Logger): Logger to be used.\n \"\"\"\n\n saved_attributes = ['model', 'optimizer']\n\n def __init__(self, model, optimizer,\n beta=0,\n phi=lambda x: x,\n batchsize=1,\n act_deterministically=False,\n average_entropy_decay=0.999,\n backward_separately=False,\n batch_states=chainerrl.misc.batch_states,\n logger=None):\n\n self.model = model\n self.xp = self.model.xp\n self.optimizer = optimizer\n self.beta = beta\n self.phi = phi\n self.batchsize = batchsize\n self.backward_separately = backward_separately\n self.act_deterministically = act_deterministically\n self.average_entropy_decay = average_entropy_decay\n self.batch_states = batch_states\n self.logger = logger or getLogger(__name__)\n\n # Statistics\n self.average_entropy = 0\n\n self.t = 0\n self.reward_sequences = [[]]\n self.log_prob_sequences = [[]]\n self.entropy_sequences = [[]]\n self.n_backward = 0\n\n def act_and_train(self, obs, reward):\n\n batch_obs = self.batch_states([obs], self.xp, self.phi)\n action_distrib = self.model(batch_obs)\n batch_action = action_distrib.sample().array # Do not backprop\n action = chainer.cuda.to_cpu(batch_action)[0]\n\n # Save values used to compute losses\n self.reward_sequences[-1].append(reward)\n self.log_prob_sequences[-1].append(\n action_distrib.log_prob(batch_action))\n self.entropy_sequences[-1].append(\n action_distrib.entropy)\n\n self.t += 1\n\n self.logger.debug('t:%s r:%s a:%s action_distrib:%s',\n self.t, reward, action, action_distrib)\n\n # Update stats\n self.average_entropy += (\n (1 - self.average_entropy_decay) *\n (float(action_distrib.entropy.array[0]) - self.average_entropy))\n\n return action\n\n def act(self, obs):\n with chainer.no_backprop_mode():\n batch_obs = self.batch_states([obs], self.xp, self.phi)\n action_distrib = self.model(batch_obs)\n if self.act_deterministically:\n return chainer.cuda.to_cpu(\n action_distrib.most_probable.array)[0]\n else:\n return chainer.cuda.to_cpu(action_distrib.sample().array)[0]\n\n def stop_episode_and_train(self, obs, reward, done=False):\n\n if not done:\n warnings.warn(\n 'Since REINFORCE supports episodic environments only, '\n 'calling stop_episode_and_train with done=False will throw '\n 'away the last episode.')\n self.reward_sequences[-1] = []\n self.log_prob_sequences[-1] = []\n self.entropy_sequences[-1] = []\n else:\n self.reward_sequences[-1].append(reward)\n if self.backward_separately:\n self.accumulate_grad()\n if self.n_backward == self.batchsize:\n self.update_with_accumulated_grad()\n else:\n if len(self.reward_sequences) == self.batchsize:\n self.batch_update()\n else:\n # Prepare for the next episode\n self.reward_sequences.append([])\n self.log_prob_sequences.append([])\n self.entropy_sequences.append([])\n\n if isinstance(self.model, Recurrent):\n self.model.reset_state()\n\n def accumulate_grad(self):\n if self.n_backward == 0:\n self.model.cleargrads()\n # Compute losses\n losses = []\n for r_seq, log_prob_seq, ent_seq in zip(self.reward_sequences,\n self.log_prob_sequences,\n self.entropy_sequences):\n assert len(r_seq) - 1 == len(log_prob_seq) == len(ent_seq)\n # Convert rewards into returns (=sum of future rewards)\n R_seq = np.cumsum(list(reversed(r_seq[1:])))[::-1]\n for R, log_prob, entropy in zip(R_seq, log_prob_seq, ent_seq):\n loss = -R * log_prob - self.beta * entropy\n losses.append(loss)\n total_loss = chainerrl.functions.sum_arrays(losses)\n # When self.batchsize is future.types.newint.newint, dividing a\n # Variable with it will raise an error, so it is manually converted to\n # float here.\n total_loss /= float(self.batchsize)\n F.squeeze(total_loss).backward()\n self.reward_sequences = [[]]\n self.log_prob_sequences = [[]]\n self.entropy_sequences = [[]]\n self.n_backward += 1\n\n def batch_update(self):\n assert len(self.reward_sequences) == self.batchsize\n assert len(self.log_prob_sequences) == self.batchsize\n assert len(self.entropy_sequences) == self.batchsize\n # Update the model\n assert self.n_backward == 0\n self.accumulate_grad()\n self.optimizer.update()\n self.n_backward = 0\n\n def update_with_accumulated_grad(self):\n assert self.n_backward == self.batchsize\n self.optimizer.update()\n self.n_backward = 0\n\n def stop_episode(self):\n if isinstance(self.model, Recurrent):\n self.model.reset_state()\n\n def get_statistics(self):\n return [\n ('average_entropy', self.average_entropy),\n ]\n","repo_name":"chainer/chainerrl","sub_path":"chainerrl/agents/reinforce.py","file_name":"reinforce.py","file_ext":"py","file_size_in_byte":6850,"program_lang":"python","lang":"en","doc_type":"code","stars":1134,"dataset":"github-code","pt":"21"} +{"seq_id":"74916028533","text":"# Söker efter tillgängliga freerides på hertz mellan två städer\nimport urllib.request, urllib.error\nfrom bs4 import BeautifulSoup\nimport ssl\nfrom inspect import cleandoc\nimport re\nimport sys\ntry:\n stad1 = sys.argv[1]\nexcept:\n IndexError\n print(f'Ange enligt formatet \"hertz.py stad1 stad2\"')\n exit()\ntry:\n stad2 = sys.argv[2]\nexcept:\n IndexError\n print(f'Ange enligt formatet \"hertz.py stad1 stad2\"')\n exit()\n# Denna rad gör att certifikatfel ignoreras\nssl._create_default_https_context = ssl._create_unverified_context\n\nr = urllib.request.urlopen('https://www.hertzfreerider.se/unauth/list_transport_offer.aspx')\ndokument = BeautifulSoup(r, 'html.parser')\ntagg1 = dokument('span', id=re.compile(\"^.*(offerHeader)$\"))\ntagg2 = dokument('span', id=re.compile(\"^.*(offerDate)$\"))\ntagg3 = dokument('span', id=re.compile(\"^.*(Label1)$\"))\ntagg4 = dokument('span', id=re.compile(\"^.*(offerDescription1)$\"))\nprint(\"\\n*** Följande freerides finns tillgängliga ***\")\ni = 0\ninif = False\nfor element in tagg1:\n if stad1.lower() in element.text.lower() and stad2.lower() in element.text.lower():\n print(cleandoc(tagg1[i].text))\n print(f'Hämtas tidigast: {tagg2[i].text}')\n print(f'Lämnas senast: {tagg3[i].text}')\n print(tagg4[i].text)\n print(\"\\n********************\")\n inif = True\n i = i + 1\nif(inif == False):\n print(f'Det fanns inga freerides mellan {stad1} och {stad2}.')","repo_name":"kaukasar/Python_projects","sub_path":"hertz.py","file_name":"hertz.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"sv","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15706350872","text":"###########################Wersja Python 3.7, na nowszych wersjach biblioteka autopy nie działa.\r\n###########################\r\nimport cv2\r\nimport numpy as np\r\nimport MouseWithHandsLIB as mwhL\r\nimport time\r\nimport autopy\r\n###########################\r\npTime = 0\r\nszOkna, wysOkna = 640, 480\r\npX, pY = 0, 0\r\ncX, cY = 0, 0\r\nwyg = 7\r\nklatkaR = 100\r\n###########################\r\ncap = cv2.VideoCapture(0) #wybór kamery, jesli kamera druga nalezy wpisać \"1\"\r\ncap.set(3,szOkna) #szerokość okna\r\ncap.set(4,wysOkna) #wysokosc okna\r\ndetector = mwhL.hDetect(maxHands =1)\r\nszEkr, wysEkr = autopy.screen.size()\r\n\r\nwhile True:\r\n success, img = cap.read() #odczyt# z kamery\r\n img = detector.fHands(img)\r\n lmList, bbox = detector.fPos(img)\r\n\r\n if len(lmList)!=0:\r\n x1, y1 = lmList[8][1:]\r\n x2, y2 = lmList[12][1:]\r\n\r\n fingers = detector.fUp()\r\n cv2.rectangle(img, (klatkaR, klatkaR), (szOkna - klatkaR, wysOkna - klatkaR), (255, 0, 255), 2)\r\n\r\n\r\n #Klikanie LPM\r\n if fingers[1] == 1 and fingers[2] == 1:\r\n length, img, lineInfo = detector.fDist(8, 12, img)\r\n print(length)\r\n if length < 40:\r\n cv2.circle(img, (lineInfo[4], lineInfo[5]), 15, (0, 255, 0), cv2.FILLED)\r\n autopy.mouse.click()\r\n\r\n\r\n #poruszanie kursorem\r\n if fingers[1]==1 and fingers[2]==0 :\r\n x3 = np.interp(x1, (klatkaR,szOkna-klatkaR),(0,szEkr))\r\n y3 = np.interp(y1, (klatkaR, wysOkna-klatkaR), (0, wysEkr))\r\n cX = pX + (x3 - pX) / wyg\r\n cY = pY + (y3 - pY) / wyg\r\n autopy.mouse.move(szEkr - cX, cY)\r\n cv2.circle(img, (x1, y1), 15, (255, 0, 255), cv2.FILLED)\r\n pX, pY = cX, cY\r\n\r\n\r\n #FrameRate\r\n cTime = time.time()\r\n fps = 1/(cTime - pTime)\r\n pTime = cTime\r\n cv2.putText(img, str(int(fps)), (20, 50), cv2.FONT_HERSHEY_PLAIN, 3, (255,255,100), 2)\r\n #Display\r\n cv2.imshow(\"Image\", img) #Tytuł okienka\r\n cv2.waitKey(1)\r\n","repo_name":"sgalek/HandControl","sub_path":"MouseWithHands.py","file_name":"MouseWithHands.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18522603422","text":"#!/usr/bin/env python3.8\n\nimport argparse\nimport os\nfrom subprocess import PIPE, run\n\ndef arg_parse ():\n parser = argparse.ArgumentParser ()\n parser.add_argument (\"--temperature\", \"-t\", help=\"Target temeprature (i.e., 300K, 77K, or 4K)\", type=int, default=4)\n parser.add_argument (\"--node\", \"-n\", help=\"Technology node (i.e., 45nm)\", type=int, default=45)\n parser.add_argument (\"--vdd\", \"-d\", help=\"Supply voltage\", type=float, default=0)\n parser.add_argument (\"--vth\", \"-r\", help=\"Threshold voltage at 300K (i.e., Vth_300k)\", type=float, default=0)\n args = parser.parse_args ()\n return args\n\n\ndef run_synthesis (design_names, temperature):\n is_not_file = 0\n is_not_ddc = 0\n for design_name in design_names:\n if not os.path.isfile (\"./latency_result/{}/critical_path_{}k\".format (design_name, temperature)):\n is_not_file += 1\n if not os.path.isfile (\"./{}_{}k.ddc\".format (design_name, temperature)):\n is_not_ddc += 1\n\n if is_not_file:\n if is_not_ddc:\n os.system (\"make dc-topo-{}k\".format (temperature))\n\n\ndef run_delay_extraction (design_names, temperature):\n # Critical path at target temperature (transistor + wire).\n is_not_file = 0\n for design_name in design_names:\n if not os.path.isfile (\"./latency_result/{}/critical_path_{}k\".format (design_name, temperature)):\n is_not_file += 1\n\n if is_not_file:\n os.system (\"make critical-{}k\".format (temperature))\n\n\ndef clean_up ():\n os.system (\"make clean\")\n\n\ndef main ():\n\n args = arg_parse ()\n temperature = args.temperature\n node = args.node\n vdd = args.vdd if args.vdd > 0 else 1.0 # Vdd of 45nm ITRS\n vth = args.vth if args.vth > 0 else 0.46893 # Vth of 45nm ITRS\n design_names = [\"drive_circuit\", \"pulse_circuit\", \"readout_rx_circuit\", \"readout_tx_circuit\"]\n\n # Input-requirement checking.\n if node != 45:\n print (\"Currently, CryoPipeline only supports 45nm.\")\n exit ()\n if not any ((temperature == key_) for key_ in [300, 77, 4]):\n print (\"Currently, CryoPipeline only supports 300K, 77K, and 4K.\")\n exit ()\n \n run_synthesis (design_names, temperature)\n run_delay_extraction (design_names, temperature)\n clean_up ()\n\n \nif __name__ == \"__main__\":\n main ()\n","repo_name":"SNU-HPCS/QIsim","sub_path":"device_model/cmos/CryoModel/CryoPipeline/logic_model.py","file_name":"logic_model.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"21"} +{"seq_id":"71259990133","text":"# -*- coding: utf-8 -*-\n\"\"\"\n-------------------------------------------------\n File Name: six\n Description :\n Author : JHao\n date: 2020/6/22\n-------------------------------------------------\n Change Activity:\n 2020/6/22:\n-------------------------------------------------\n\"\"\"\n__author__ = 'JHao'\n\nimport sys\n\nPY2 = sys.version_info[0] == 2\nPY3 = sys.version_info[0] == 3\n\nif PY3:\n def iteritems(d, **kw):\n return iter(d.items(**kw))\nelse:\n def iteritems(d, **kw):\n return d.iteritems(**kw)\n\nif PY3:\n from urllib.parse import urlparse\nelse:\n from urlparse import urlparse\n\nif PY3:\n from imp import reload as reload_six\nelse:\n reload_six = reload\n\nif PY3:\n from queue import Empty, Queue\nelse:\n from Queue import Empty, Queue\n\n\ndef withMetaclass(meta, *bases):\n \"\"\"Create a base class with a metaclass.\"\"\"\n\n # This requires a bit of explanation: the basic idea is to make a dummy\n # metaclass for one level of class instantiation that replaces itself with\n # the actual metaclass.\n # class MetaClass(meta):\n #\n # def __new__(cls, classname, supers, attrdict):\n # return meta(classname, bases, attrdict)\n\n # return type.__new__(MetaClass, 'temporary_class', (), {})\n return meta('temporary_class', bases, {})\n\n\nif __name__ == '__main__':\n from util.singleton import Singleton\n clazz = withMetaclass(Singleton)\n clazz2 = withMetaclass(Singleton)\n instance1 = clazz()\n instance2 = clazz2()\n print(instance1 is instance2)\n # print(id(clazz))\n # print(id(clazz2))\n # print(clazz is clazz2)\n\n # a = type('A', (), {})\n # b = type('A', (), {})\n # print(a is b)","repo_name":"Bruceey/Spider_Learning","sub_path":"09、代理池/proxy_pool-master/util/six.py","file_name":"six.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"2694301701","text":"import pgzrun\nimport copy\nimport random\nimport time\nimport math\nimport collections\n\npocet_pixelu = 10\nvyska = 50 #50\nsirka = 100 #100\nWIDTH = pocet_pixelu * sirka\nHEIGHT = pocet_pixelu * (vyska + 2)\nvelikost_hada = 5\nhad_souradnice = []\nhlava_hada = []\npole = []\nsmer = [1, 1]\npoint_collected = False\nspawned = False\nkonec = False\nescaped = True\nstarted = False\nbot = True\ncesta = collections.deque()\nnacteno = 0\nsouradky_pointu = [0, 0]\nfor y in range(vyska):\n radek = []\n for x in range(sirka):\n had_a_had_hledani = [0, 0]\n radek.append(had_a_had_hledani)\n pole.append(radek)\nfor had in range(velikost_hada):\n hlava_hada = [round(vyska / 2), round(sirka / 2) + had - 2]\n had_souradnice.append(hlava_hada)\n print(hlava_hada)\nfor had in range(velikost_hada):\n pole[had_souradnice[had][0]][had_souradnice[had][1]][0] = 1\nprint(had_souradnice)\n\n\ndef pohyb():\n global hlava_hada, pole, point_collected, velikost_hada, spawned, konec, escaped\n if not bot:\n if smer[0] == 0:\n if can_move(hlava_hada[0] + smer[1], hlava_hada[1], \"pohyb\"):\n hlava_hada_neco = copy.deepcopy(hlava_hada)\n hlava_hada_neco[smer[0]] += smer[1]\n if not point_collected:\n odpad = had_souradnice.pop(0)\n pole[odpad[0]][odpad[1]][0] = 0\n point_collected = False\n had_souradnice.append(hlava_hada_neco)\n hlava_hada = copy.deepcopy(hlava_hada_neco)\n for had in range(velikost_hada):\n pole[had_souradnice[had][0]][had_souradnice[had][1]][0] = 1\n if hlava_hada == souradky_pointu:\n point_collected = True\n spawned = False\n velikost_hada += 1\n point_spawn()\n else:\n konec = True\n escaped = True\n konec_hry()\n clock.unschedule(pohyb)\n else:\n if can_move(hlava_hada[0], hlava_hada[1] + smer[1], \"pohyb\"):\n hlava_hada_neco = copy.deepcopy(hlava_hada)\n hlava_hada_neco[smer[0]] += smer[1]\n if not point_collected:\n odpad = had_souradnice.pop(0)\n pole[odpad[0]][odpad[1]][0] = 0\n point_collected = False\n had_souradnice.append(hlava_hada_neco)\n hlava_hada = copy.deepcopy(hlava_hada_neco)\n for had in range(velikost_hada):\n pole[had_souradnice[had][0]][had_souradnice[had][1]][0] = 1\n if hlava_hada == souradky_pointu:\n point_collected = True\n spawned = False\n velikost_hada += 1\n point_spawn()\n else:\n konec = True\n escaped = True\n konec_hry()\n clock.unschedule(pohyb)\n else:\n try:\n misto = cesta.pop()\n had_souradnice.append(misto)\n if not point_collected:\n odpad = had_souradnice.pop(0)\n pole[odpad[0]][odpad[1]][0] = 0\n point_collected = False\n for had in range(velikost_hada):\n pole[had_souradnice[had][0]][had_souradnice[had][1]][0] = 1\n hlava_hada = misto\n if hlava_hada == souradky_pointu:\n spawned = False\n point_spawn()\n except IndexError:\n point_collected = True\n velikost_hada += 1\n# point_spawn()\n hledani_cesty()\n\ndef can_move(y, x, kdo):\n if y < 0 or x < 0:\n return False\n if kdo == \"pohyb\":\n try:\n if pole[y][x] != 1:\n return True\n else:\n return False\n except IndexError:\n return False\n if kdo == \"smer\":\n if had_souradnice[velikost_hada - 2][0] == y and had_souradnice[velikost_hada - 2][1] == x:\n return False\n else:\n return True\n return True\n\ndef konec_hry():\n global hlava_hada, smer, point_collected, spawned, konec, escaped, started, souradky_pointu, pole, had_souradnice, velikost_hada\n velikost_hada = 5\n had_souradnice = []\n hlava_hada = []\n pole = []\n smer = [1, 1]\n point_collected = False\n spawned = False\n konec = False\n escaped = True\n started = False\n souradky_pointu = [0, 0]\n for y in range(vyska):\n radek = []\n for x in range(sirka):\n radek.append(0)\n pole.append(radek)\n for had in range(velikost_hada):\n hlava_hada = [int(vyska / 2), int(sirka / 2) + had - 2]\n had_souradnice.append(hlava_hada)\n for had in range(velikost_hada):\n pole[had_souradnice[had][0]][had_souradnice[had][1]][0] = 1\n\ndef point_spawn():\n global spawned, souradky_pointu\n while not spawned:\n x = random.randint(0, sirka - 1)\n y = random.randint(0, vyska - 1)\n if pole[y][x][0] == 0:\n spawned = True\n pole[y][x][0] = 2\n souradky_pointu = [y, x]\n# if bot:\n# hledani_cesty()\n\ndef hledani_cesty():\n global cesta, nacteno\n breakout = False\n na_hlave = False\n doba_trvani = -1\n nacteno = 0\n scitani = 0\n pole_z_wishe = copy.deepcopy(pole)\n neighbours = collections.deque()\n neighbours.append(hlava_hada)\n cesta = collections.deque()\n cesta.append(souradky_pointu)\n if spawned:\n while not breakout:\n doba_trvani += 1\n pozice = neighbours.popleft()\n for sx, sy in ((-1, 0), (1, 0), (0, -1), (0, 1)):\n y = pozice[0] + sy\n x = pozice[1] + sx\n if x < 0 or x >= sirka or y < 0 or y >= vyska:\n continue\n if souradky_pointu == [y, x]:\n pole_z_wishe[y][x][1] = pole_z_wishe[pozice[0]][pozice[1]][1] + 1\n breakout = True\n if pole_z_wishe[y][x][1] == 0 and pole_z_wishe[y][x][0] != 1:\n pole_z_wishe[y][x][1] = pole_z_wishe[pozice[0]][pozice[1]][1] + 1\n if velikost_hada - 1 > pole_z_wishe[pozice[0]][pozice[1]][1]:\n pole_z_wishe[had_souradnice[pole_z_wishe[pozice[0]][pozice[1]][1]][0]][had_souradnice[pole_z_wishe[pozice[0]][pozice[1]][1]][1]][0] = 0\n neighbours.append([y, x])\n\n while not na_hlave:\n pozice = cesta.pop()\n cesta.append(pozice)\n scitani += 1\n if scitani == vyska * sirka:\n na_hlave = True\n for sx, sy in ((-1, 0), (1, 0), (0, -1), (0, 1)):\n y = pozice[0] + sy\n x = pozice[1] + sx\n if x < 0 or x >= sirka or y < 0 or y >= vyska:\n continue\n if pole_z_wishe[y][x][1] == pole_z_wishe[pozice[0]][pozice[1]][1] - 1:\n pole_z_wishe[pozice[0]][pozice[1]][1] = 0\n cesta.append([y, x])\n if y == hlava_hada[0] and x == hlava_hada[1]:\n na_hlave = True\n\n\n for y in range(vyska):\n for x in range(sirka):\n if pole[y][x][1] != 0:\n pole[y][x][1] = 0\n cesta.appendleft(souradky_pointu)\n je_toto_hlava = cesta.pop()\n if not je_toto_hlava == hlava_hada:\n cesta.append(je_toto_hlava)\n print(souradky_pointu, cesta, hlava_hada)\n\n\ndef on_key_down(key):\n global smer, escaped, aktualnicas, casovac, started\n if escaped:\n if key == 32:\n casovac = time.time()\n if started:\n casovac -= aktualnicas\n if not started:\n point_spawn()\n started = True\n hledani_cesty()\n clock.schedule_interval(pohyb, 0.1)\n escaped = False\n if not escaped:\n if key == 119 or key == keys.UP:\n if can_move(hlava_hada[0] - 1, hlava_hada[1], \"smer\"):\n smer[0] = 0\n smer[1] = -1\n if key == 115 or key == keys.DOWN:\n if can_move(hlava_hada[0] + 1, hlava_hada[1], \"smer\"):\n smer[0] = 0\n smer[1] = 1\n if key == 100 or key == keys.RIGHT:\n if can_move(hlava_hada[0], hlava_hada[1] + 1, \"smer\"):\n smer[0] = 1\n smer[1] = 1\n if key == 97 or key == keys.LEFT:\n if can_move(hlava_hada[0], hlava_hada[1] - 1, \"smer\"):\n smer[0] = 1\n smer[1] = -1\n if key == keys.ESCAPE:\n escaped = True\n clock.unschedule(pohyb)\n\n\ndef update():\n draw()\n\ndef draw():\n global aktualnicas\n screen.clear()\n if not konec:\n for y, radek in enumerate(pole):\n for x, had in enumerate(radek):\n if had[0] == 0:\n r = Rect((x * pocet_pixelu, y * pocet_pixelu), (pocet_pixelu, pocet_pixelu))\n screen.draw.filled_rect(r, (0x00, 0x00, 0x00))\n if had[0] == 1:\n r = Rect((x * pocet_pixelu, y * pocet_pixelu), (pocet_pixelu, pocet_pixelu))\n screen.draw.filled_rect(r, (0xff, 0xff, 0x00))\n if had[0] == 2:\n r = Rect((x * pocet_pixelu, y * pocet_pixelu), (pocet_pixelu, pocet_pixelu))\n screen.draw.filled_rect(r, (0xfe, 0x01, 0x9a))\n if had[0] == 3:\n r = Rect((x * pocet_pixelu, y * pocet_pixelu), (pocet_pixelu, pocet_pixelu))\n screen.draw.filled_rect(r, (0x00, 0x00, 0xff))\n screen.draw.line((0, vyska * pocet_pixelu), ((sirka + 1) * pocet_pixelu, vyska * pocet_pixelu), (0xff, 0xff, 0xff))\n if escaped:\n screen.draw.text(\"PRESS SPACE\", ((sirka / 2 - 7) * pocet_pixelu, (vyska / 2 - 2) * pocet_pixelu), color=\"white\", fontsize=30/10 * pocet_pixelu)\n if not escaped:\n if started or not escaped:\n aktualnicas = time.time() - casovac\n cashms = time.strftime(\"%H:%M:%S\", time.gmtime(aktualnicas))\n milisec = (aktualnicas - math.floor(aktualnicas)) * 1000\n milisec = math.floor(milisec)\n screen.draw.text((\"time: \" + cashms + \":\" + str(milisec)), (1 * pocet_pixelu, vyska * pocet_pixelu),\n color=\"green\", fontsize=pocet_pixelu * (24 / 10))\n if escaped:\n screen.draw.text(\"time: 00:00:00:000\", (1 * pocet_pixelu, vyska * pocet_pixelu),\n color=\"green\", fontsize=pocet_pixelu * (24 / 10))\npgzrun.go()","repo_name":"Panda00098/Snake","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"934240173","text":"\"\"\"\n\nmiqp-clf2lin\n------------\n\nDefinition of different feature subsets to be used in learning experiments.\nA dictionary is created with different lists of features, and finally saved.\nFor a complete list of available features, refer to doc/processed_features.md\n\nRun as\n\n python feature_subsets.py\n\n\"\"\"\n\nimport argparse\nimport os\nimport pickle\n\n\nif __name__ == \"__main__\":\n\n # Parser definition\n parser = argparse.ArgumentParser(description='Arg parser for feature subsets definition.')\n\n parser.add_argument(\n '--save_dict_path',\n type=str,\n default='../data',\n help='Location where dictionary will be saved.'\n )\n\n args = parser.parse_args()\n\n fts_dict = {\n 'general_fts': [\n 'Name', 'Origin', 'Curvature', 'BinLabel', 'MultiLabel', 'Weight', 'Time_L', 'Time_NL'\n ],\n\n 'Init60': [\n # general\n 'RSizes', 'RBin', 'RCont_RInt',\n # quadratic objective\n 'RNnzDiagBin', 'RNnzDiagCont_RNnzDiagInt',\n 'DiagDensity', 'OutDiagDensity', 'QDensity', 'RBinBin', 'RContCont_RIntInt',\n 'RMixedBin', 'RMixedCont_RMixedInt',\n 'RNonLinTerms', 'RNonLinTermsNnz', 'RelVarsLinInc', 'RelConssLinInc', 'RLinSizes',\n 'NormMaxDegBin', 'NormMaxDegCont_NormMaxDegInt',\n 'AvgDiagDom', 'RDiagCoeff', 'ROutDiagCoeff',\n # linear objective\n 'RNnzBinLin', 'RNnzContLin_RNnzIntLin',\n 'HasLinearTerm', 'LinDensity', 'RLinCoeff',\n # constraints\n 'ConssDensity', 'RConssBin', 'RConssCont', 'RConssInt', 'RConssCoeff', 'RRhsCoeff',\n # spectrum\n 'RQTrace', 'QSpecNorm',\n 'RQRankEig', 'HardEigenPerc',\n 'AvgSpecWidth',\n 'RPosEigen', 'RNegEigen', 'RZeroEigen',\n 'RAbsEigen', 'RNZeroEigenDiff', 'HardEigenPercDiff',\n # preprocessing\n 'prep_RelVarsIncL', 'prep_RelVarsIncNL', 'prep_RelConssIncL', 'prep_RelConssIncNL',\n 'prep_RSizesL', 'prep_RSizesNL',\n 'prep_ConssDensityL', 'prep_ConssDensityNL', 'prep_ConssDensityDiff',\n 'prep_RelConssDensityL', 'prep_RelConssDensityNL',\n # root node\n 'root_RtTimeDiff', 'root_RLPTimeDiff',\n 'root_SignRDBDiff', 'root_RelRDBDiff', 'root_RelSignRDBDiff',\n ],\n\n 'Selected': [\n # general\n 'RBin', 'RCont_RInt',\n # quadratic objective\n 'RNnzDiagCont_RNnzDiagInt', 'OutDiagDensity', 'QDensity', 'RBinBin', 'RContCont_RIntInt',\n 'RNonLinTerms', 'RelVarsLinInc', 'RLinSizes', 'NormMaxDegBin', 'NormMaxDegCont_NormMaxDegInt',\n # linear objective\n 'RNnzContLin_RNnzIntLin',\n # constraints\n 'ConssDensity', 'RConssInt',\n # spectrum\n 'RQRankEig', 'HardEigenPerc',\n # preprocessing\n 'prep_RelVarsIncL', 'prep_RelConssIncL', 'prep_RSizesL', 'prep_ConssDensityL'\n ],\n\n }\n\n pickle.dump(fts_dict, open(os.path.join(args.save_dict_path, 'fts_subsets.pkl'), 'wb'))\n print(\"Dictionary of feature subsets saved at \\n{}\".format(os.path.join(args.save_dict_path, 'fts_subsets.pkl')))\n print(\"Keys:\\n{}\".format(fts_dict.keys()))\n","repo_name":"ds4dm/miqp-clf2lin","sub_path":"src/feature_subsets.py","file_name":"feature_subsets.py","file_ext":"py","file_size_in_byte":3268,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"72623404853","text":"from keras.models import Sequential\nfrom keras.layers import Dense, Activation, Dropout, Flatten\nfrom keras.layers import Conv2D\nfrom keras.layers import MaxPooling2D\nfrom keras.datasets import cifar10\nimport keras\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nimport numpy as np\nfrom keras.utils import plot_model\n\ninput_shape = (32,32,3)\n\ndef Build(model):\n # 第一個部分的conv跟pool\n model.add(Conv2D(32, (3, 3), input_shape=input_shape, padding='same',activation='relu',kernel_initializer='he_normal'))\n model.add(Conv2D(32, (3, 3), activation='relu', padding='same',kernel_initializer='he_normal'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n # 第二part的conv pool\n model.add(Conv2D(64, (3, 3), activation='relu', padding='same',kernel_initializer='he_normal'))\n model.add(Conv2D(64, (3, 3), activation='relu', padding='same',kernel_initializer='he_normal'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n # model.add(Conv2D(128, (3, 3), activation='relu', padding='same',kernel_initializer='he_normal'))\n # model.add(Conv2D(128, (3, 3), activation='relu', padding='same',kernel_initializer='he_normal'))\n # model.add(Conv2D(128, (3, 3), activation='relu', padding='same',kernel_initializer='he_normal'))\n # model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n model.add(Flatten())\n model.add(Dense(1024, activation='relu',kernel_initializer='he_normal'))\n model.add(Dense(10, activation='softmax'))\n return model\n\n# def read_data():\n# return 1\n\ndef Train(model):\n # data = read_data()\n (train_feature,train_label),(test_feature,test_label) = cifar10.load_data()\n train_feature = train_feature/255\n test_feature = test_feature/255\n print(train_feature.shape)\n train_label = keras.utils.to_categorical(train_label, 10)\n test_label = keras.utils.to_categorical(test_label, 10)\n print(len(train_feature))\n model.compile(loss='categorical_crossentropy',optimizer='adam') \n model.fit(train_feature,train_label, batch_size=200, epochs=1, verbose=1)\n result = model.predict(test_feature)\n result_bool = np.equal(result, test_label) \n true_num = np.sum(result_bool) \n print(\"\") \n print(\"The accuracy of the model is %f\" % (true_num/len(result_bool)))\n \n\n\n\ndef main():\n model = Sequential()\n model=Build(model)\n Train(model)\n model.save('my_model.h5')\n plot_model(model, to_file='model.png')\n\nif __name__ == '__main__':\n main()","repo_name":"henry3556108/tensorflow","sub_path":"GAN_Keras/CNN手寫數字/CNN.py","file_name":"CNN.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"39625776992","text":"from django import forms\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.flatpages.models import FlatPage\nfrom django.contrib.sites.models import Site\nfrom django.core.cache import cache\nfrom django.test import Client, TestCase, modify_settings\nfrom django.urls import reverse\n\nfrom posts.models import Comment, Follow, Group, Post, User\n\n\nclass TaskPagesTests(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n # create user testa\n cls.user = User.objects.create(\n username='testa',\n email='testa@gmail.com',\n password='12345678',\n )\n # create group\n cls.group = Group.objects.create(\n title='testtitle',\n slug='testtitle',\n )\n # create sign in DB\n Post.objects.create(\n text='ТестаПост',\n author=TaskPagesTests.user,\n group=TaskPagesTests.group,\n )\n\n def setUp(self):\n # cache.clear()\n # create authorised client\n User = get_user_model()\n self.user1 = User.objects.create_user(\n username='testuser',\n email='testuser@gmail.com',\n password='12345678',\n )\n self.authorized_client = Client()\n self.guest_client = Client()\n self.author_client = Client()\n # use user for authorize&make posts\n self.authorized_client.force_login(self.user1)\n self.author_client.force_login(TaskPagesTests.user)\n self.testpost = Post.objects.get(text='ТестаПост')\n # create site for flatpages\n site1 = Site(pk=1, domain='localhost:8000', name='localhost:8000')\n site1.save()\n FlatPage.objects.create(\n url='/about-author/',\n title='О авторе',\n content='немного о себе',\n ).sites.add(site1)\n FlatPage.objects.create(\n url='/about-spec/',\n title='О технологии',\n content='немного о технологии',\n ).sites.add(site1)\n self.static_pages = ('/about-author/', '/about-spec/')\n\n def test_pages_uses_correct_template(self):\n \"\"\"URL-адрес использует соответствующий шаблон.\"\"\"\n # selecting \" name_of_html_template: url_revers_name\"\n templates_pages_names = {\n 'index.html': reverse('index'),\n 'new.html': reverse('new_post'),\n 'group.html': (\n reverse('group_posts', kwargs={'slug': 'testtitle'})),\n }\n for template, reverse_name in templates_pages_names.items():\n with self.subTest(reverse_name=reverse_name):\n response = self.authorized_client.get(reverse_name)\n self.assertTemplateUsed(response, template)\n\n def test_home_page_show_correct_context(self):\n \"\"\"Шаблон index сформирован с правильным контекстом.\"\"\"\n response = self.authorized_client.get(reverse('index'))\n # get fields in context\n field_text = response.context.get('posts')[0].text\n field_pub_date = response.context.get('posts')[0].pub_date\n field_author = response.context.get('posts')[0].author.username\n field_group = response.context.get('posts')[0].group.title\n pubdate = self.testpost.pub_date\n # checking fields for equal\n self.assertEqual(field_text, 'ТестаПост')\n self.assertEqual(field_author, 'testa')\n self.assertEqual(field_pub_date, pubdate)\n self.assertEqual(field_group, 'testtitle')\n\n def test_home_page_show_correct_paginator(self):\n \"\"\"В шаблон index передан корректное количесство постов на страницу.\"\"\"\n response = self.authorized_client.get(reverse('index'))\n # get fields in context\n field_pages = response.context.get('page')\n # checking fields for equal\n self.assertEqual(len(field_pages), 1)\n\n def test_group_page_show_correct_context(self):\n \"\"\"Шаблон group сформирован с правильным контекстом.\"\"\"\n response = self.authorized_client.get(\n reverse('group_posts', kwargs={'slug': 'testtitle'})\n )\n # get fields in context\n field_text = response.context.get('posts')[0].text\n field_pub_date = response.context.get('posts')[0].pub_date\n field_author = response.context.get('posts')[0].author.username\n field_group = response.context.get('posts')[0].group.title\n pubdate = self.testpost.pub_date\n # checking fields for equal\n self.assertEqual(field_text, 'ТестаПост')\n self.assertEqual(field_author, 'testa')\n self.assertEqual(field_pub_date, pubdate)\n self.assertEqual(field_group, 'testtitle')\n\n def test_profile_page_show_correct_context(self):\n \"\"\"Шаблон profile сформирован с правильным контекстом.\"\"\"\n response = self.authorized_client.get(\n reverse('profile', kwargs={'username': 'testa'})\n )\n # get fields in context\n field_text = response.context.get('post')[0].text\n field_pub_date = response.context.get('post')[0].pub_date\n field_author = response.context.get('post')[0].author.username\n pubdate = self.testpost.pub_date\n # checking fields for equal\n self.assertEqual(field_text, 'ТестаПост')\n self.assertEqual(field_author, 'testa')\n self.assertEqual(field_pub_date, pubdate)\n\n def test_author_post_page_show_correct_context(self):\n \"\"\"Шаблон profile сформирован с правильным контекстом.\"\"\"\n postid = self.testpost.id\n response = self.authorized_client.get(\n reverse('post', kwargs={'username': 'testa', 'post_id': postid})\n )\n # get fields in context\n field_text = response.context.get('post').text\n field_author = response.context.get('post_author').username\n field_post_id = response.context.get('post_id')\n # checking fields for equal\n self.assertEqual(field_text, 'ТестаПост')\n self.assertEqual(field_author, 'testa')\n self.assertEqual(field_post_id, postid)\n\n # checking context of new post(have form)\n def test_new_page_show_correct_context(self):\n \"\"\"Шаблон new сформирован с правильным контекстом.\"\"\"\n response = self.authorized_client.get(reverse('new_post'))\n # list awaiting types fields of form:\n # указываем, объектами какого класса должны быть поля формы\n form_fields = {\n # При создании формы поля модели типа TextField\n # преобразуются в CharField с виджетом forms.Textarea\n 'text': forms.fields.CharField,\n 'group': forms.fields.ChoiceField,\n }\n # checking, that types fields of form in context\n for value, expected in form_fields.items():\n with self.subTest(value=value):\n form_field = response.context.get('form').fields.get(value)\n # checking, that field of form is instance of our class\n self.assertIsInstance(form_field, expected)\n\n def test_edit_page_show_correct_context(self):\n \"\"\"Шаблон new(edit) сформирован с правильным контекстом.\"\"\"\n postid = self.testpost.id\n response = self.author_client.get(\n reverse('post_edit', kwargs={\n 'username': 'testa', 'post_id': postid})\n )\n form_fields = {\n # При создании формы поля модели типа TextField\n # преобразуются в CharField с виджетом forms.Textarea\n 'text': forms.fields.CharField,\n 'group': forms.fields.ChoiceField,\n }\n for value, expected in form_fields.items():\n with self.subTest(value=value):\n form_field = response.context.get('form').fields.get(value)\n # checking, that field of form is instance of our class\n self.assertIsInstance(form_field, expected)\n\n def test_index_page_show_correct_context(self):\n \"\"\"Шаблон index сформирован с правильным контекстом.\"\"\"\n response = self.authorized_client.get(reverse('index'))\n post_text_0 = response.context.get('posts')[0].text\n post_group_0 = response.context.get('posts')[0].group.title\n self.assertEqual(post_text_0, 'ТестаПост')\n self.assertEqual(post_group_0, 'testtitle')\n\n def test_group_posts_pages_show_correct_context(self):\n \"\"\"Шаблон group.html сформирован с ��равильным контекстом.\"\"\"\n response = self.authorized_client.get(\n reverse('group_posts', kwargs={'slug': 'testtitle'})\n )\n group_post_0 = response.context.get('posts')[0].text\n group_group_0 = response.context.get('posts')[0].group.title\n self.assertEqual(group_post_0, 'ТестаПост')\n self.assertEqual(group_group_0, str(TaskPagesTests.group))\n\n def test_about_author_flatpage(self):\n \"\"\"Тест доступности страницы 'об авторе'\"\"\"\n response = self.guest_client.get(reverse('about-author'))\n self.assertEqual(response.status_code, 200, f'{response}')\n\n def test_about_spec_flatpage(self):\n \"\"\"Тест доступности страницы 'о технологиях'\"\"\"\n response = self.guest_client.get(reverse('about-spec'))\n self.assertEqual(response.status_code, 200, f'{response}')\n\n def test_home_page_show_correct_context_with_cache(self):\n \"\"\"изменение отображение контекста при работе с кэшем.\"\"\"\n # get fields in context\n client = self.authorized_client\n response = client.get(reverse('index'))\n content = response.content\n # checking fields for equal\n self.assertEqual(response.content, content)\n # deleting all\n Post.objects.all().delete()\n # checking fields for equal (cache is working)\n response = client.get(reverse('index'))\n self.assertEqual(response.content, content)\n # cache.clear\n cache.clear()\n # checking fields for not equal (cache is clearing)\n response = client.get(reverse('index'))\n self.assertNotEqual(response.content, content)\n\n def test_follow_author(self):\n \"\"\"проверка включения подписки авторизованным пользователем\"\"\"\n # create new following\n self.authorized_client.get(reverse(\n 'profile_follow', kwargs={'username': 'testa'})\n )\n # checking for a new data in Follow model\n folowing_author = Follow.objects.filter(\n author__username='testa').exists()\n self.assertTrue(folowing_author)\n\n def test_unfollow_outhor(self):\n \"\"\"проверка отписки авторизованного пользователя\"\"\"\n # create new following\n self.authorized_client.get(reverse(\n 'profile_follow', kwargs={'username': 'testa'})\n )\n # checking for a new data in Follow model\n following_author = Follow.objects.filter(\n author__username='testa').exists()\n self.assertTrue(following_author)\n # delete following\n self.authorized_client.get(reverse(\n 'profile_unfollow', kwargs={'username': 'testa'}))\n # checking for a result\n unfollowing_author = Follow.objects.filter(\n author__username='testa').exists()\n self.assertFalse(unfollowing_author)\n\n def test_new_post_in_follower_list(self):\n \"\"\"после добавления подписки, посты автора появятся в ленте\"\"\"\n # making response to follow_index\n response = self.authorized_client.get(reverse('follow_index'))\n content = response.content\n # create new follow\n self.authorized_client.get(reverse(\n 'profile_follow', kwargs={'username': 'testa'})\n )\n # check for update in content\n cache.clear()\n response = self.authorized_client.get(reverse('follow_index'))\n self.assertNotEqual(response.content, content,\n 'Ошибка отображения, контент должен изменится')\n # check for present of post of author\n text_upd_post = response.context.get('page')[0].text\n self.assertEqual(text_upd_post, 'ТестаПост',\n f'Ошибка отображения,{text_upd_post} должен появится')\n\n def test_make_comments_authorized(self):\n \"\"\"Только авторизированный пользователь может комментировать посты\"\"\"\n # check value of comments\n comments_count = Comment.objects.count()\n form_data = {\n 'text': 'test_text',\n }\n # post response from guest\n self.guest_client.post(reverse('add_comment', kwargs={\n 'username': self.testpost.author.username, 'post_id': self.testpost.id\n }), data=form_data, follow=True, )\n self.assertEqual(Comment.objects.count(), comments_count)\n # post from authorized user\n self.authorized_client.post(reverse('add_comment', kwargs={\n 'username': self.testpost.author.username, 'post_id': self.testpost.id\n }), data=form_data, follow=True,)\n # comments_count2 = Comment.objects.count()\n self.assertEqual(Comment.objects.count(), comments_count + 1)\n post = Comment.objects.filter(text='test_text').exists\n self.assertTrue(post)\n","repo_name":"toshiharu13/Yatube_final","sub_path":"posts/tests/test_view.py","file_name":"test_view.py","file_ext":"py","file_size_in_byte":14189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39338614196","text":"from Spells.Spell import Spell, CurrentStates\nfrom BasicControls import BasicControls\nfrom SearchForEnemy.MoveToEnemy import MoveToEnemy\n\n\nclass SpellAttack(Spell):\n spellName = \"SpellAttack\"\n\n spellBar = 2\n spellSlot = 1\n\n priority = 5\n\n def __init__(self, priority=5):\n Spell.__init__(self)\n\n self.priority = priority\n\n def shouldUseThisSpell(self, states: CurrentStates):\n Spell.currentStates = states\n if states.isTargetingEnemy and states.haveEnoughMagicka:\n return True\n else:\n return False\n\n def use(self):\n from _Configuration import useAutoWalk\n if useAutoWalk:\n MoveToEnemy.getInstance().moveToEnemyLong()\n BasicControls.getInstance().normalAttack()\n\n if self.currentStates.sp1:\n self.spellSlot = 3\n else:\n self.spellSlot = 1\n super().use()\n\n\nif __name__ == \"__main__\":\n attack: Spell = SpellAttack()\n state: CurrentStates = CurrentStates()\n\n print(attack.shouldUseThisSpell(state))\n\n state2: CurrentStates = CurrentStates()\n\n state2.haveDamageShield = True\n state2.isTargetingEnemy = True\n state2.haveEnoughHealth = True\n state2.haveEnoughMagicka = True\n\n print(attack.shouldUseThisSpell(state2))\n\n attack.use()\n\n\n\n","repo_name":"Sibiryak82/ESOB","sub_path":"ESOAutoPlay/Spells/SpellAttack.py","file_name":"SpellAttack.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"20199118937","text":"import serial\nimport time\nimport requests\n\n# Set up serial port\nser = serial.Serial('COM5', 230400, timeout=0)\n\n# Urls\nurl_location = \"http://52.45.17.177:802/XpertRestApi/api/location_data\"\nurl_alert = \"http://52.45.17.177:802/XpertRestApi/api/alert_data\"\n\n# Times\npostInterval = 5\nlastPostTime = time.time()\n\nprint(\"Starting packet reading at: \" + time.ctime(lastPostTime))\n\n# Print the first element of serial-read data if the array isn't empty\nwhile True:\n if ser.in_waiting:\n packetTime = time.time()\n packet = ser.readline()\n if (time.time() > lastPostTime + postInterval):\n lastPostTime = packetTime\n print(\"\\n\"+ packet.decode('utf'))\n res = requests.post(\n url_location,\n json={\n \"deviceimei\": 111112222233333,\n \"altitude\": 1,\n \"latitude\": 38.443976,\n \"longitude\": -78.874720,\n \"devicetime\": 10,\n \"speed\": 0,\n \"Batterylevel\": \"85\",\n \"casefile_id\": \"string\",\n \"address\": \"string\",\n \"positioningmode\": \"string\",\n \"tz\": \"string\",\n \"alert_type\": \"string\",\n \"alert_message\": \"string\",\n \"alert_id\": \"string\",\n \"offender_name\": \"string\",\n \"offender_id\": \"string\"\n }\n )\n print(res.json())\n\nser.close()\nser.is_open","repo_name":"MatthewKLewis/IOxSerial","sub_path":"python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36099768025","text":"import argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--in_path', '--in', default=None,type=str, help=\"labeln_dir\")\nparser.add_argument('--out_path', '--out', default=None,type=str, help=\"labeln_dir\")\n\nargs = parser.parse_args()\n\nf1 = open(args.in_path,'r')\nf2 = open(args.out_path,'w')\n\nfor line in f1:\n line = line.replace('\\n','')\n s = line.split(' ')\n new_line = s[0] #image path\n\n boxes = s[1:]\n for box in boxes:\n new_box = box[0:-1]+'0'\n new_line += (' '+new_box)\n \n # print(new_line)\n f2.write(new_line+'\\n')","repo_name":"chiahuilin0531/yolov4-tflite","sub_path":"data/dataset/3cls-to-1cls.py","file_name":"3cls-to-1cls.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20722487940","text":"import os\nimport time\nimport urllib.request\nfrom datetime import datetime\nfrom urllib.error import HTTPError\n\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import quote\n\nfrom spreadsheet_update import update_cell, grab_column\n\nsr_column = 'Roster!F'\ncareer_high_column = 'Roster!G'\nlast_updated_column = 'Roster!H'\nnotes_column = 'Roster!I'\n\nSPREADSHEET_ID = os.environ.get('SHEET_ID')\n\n\nclass OverbuffScraper:\n\n @staticmethod\n def sr_fetch(player):\n player_url = player.replace(\"#\", \"-\")\n player_page = 'https://www.overbuff.com/players/pc/' + quote(player_url)\n req = urllib.request.Request(player_page)\n req.add_header(\"User-agent\", \"overbuff-scraper 0.1\")\n page = urllib.request.urlopen(req)\n page_contents = BeautifulSoup(page, 'html.parser')\n sr_span = page_contents.find('span', attrs={'class': 'player-skill-rating'})\n ispublic = True\n if page_contents.find('i', attrs={'class': 'fa-lock'}) is not None:\n ispublic = False\n return sr_span.text.strip(), ispublic\n\n @staticmethod\n def update_srs(playertag, row, date):\n \"\"\"Tries to get the sr from Overbuff and logs the SR.\"\"\"\n try:\n player_sr, ispublic = OverbuffScraper.sr_fetch(playertag)\n # print(\"{}'s SR: {}\".format(player_tag, player_sr))\n\n # Catches incorrect battletags\n except HTTPError:\n print(\"Error retrieving {}'s page. Debug/update battletag\".format(playertag))\n body = {\n 'values': [[\"\"\"Battletag incorrect? (Case-sensitive) - If you see a whole column of these, \"\"\"\n \"\"\"PM swallama NOW!!\"\"\"]]\n }\n update_cell(SPREADSHEET_ID, (notes_column + str(row)), body)\n\n # except UnicodeEncodeError:\n # \"\"\"Handles situations with odd-ball battletags\"\"\"\n # # print(\"{}'s SR: {}\".format(player_tag.encode('utf-8').strip(), player_sr))\n # body = {\n # 'values': [[player_sr]]\n # }\n # update_cell(SPREADSHEET_ID, ('Roster!F' + str(row)), body)\n # update_cell(SPREADSHEET_ID, ('Roster!H' + str(row)), {'values': [[\"\"]]})\n\n # Handles a un-reported/unranked player\n except AttributeError:\n print(\"{} has no SR reported\".format(playertag))\n body = {\n 'values': [[r\"**Not current**\"]]\n }\n update_cell(SPREADSHEET_ID, (notes_column + str(row)), body)\n # If everything is good to go, updates the spreadsheet\n else:\n body = {\n 'values': [[player_sr]]\n }\n update_cell(SPREADSHEET_ID, (sr_column + str(row)), body)\n if ispublic:\n update_cell(SPREADSHEET_ID, (notes_column + str(row)), {'values': [[\"\"]]})\n update_cell(SPREADSHEET_ID, (last_updated_column + str(row)), {'values': [[date]]})\n else:\n update_cell(SPREADSHEET_ID, (notes_column + str(row)), {'values': [[\"Private profile\"]]})\n print(\"{} has a private profile!\".format(playertag))\n\n\nif __name__ == '__main__':\n current_time = datetime.now()\n print(\"Running scraper on \" + datetime.strftime(current_time, \"%b %d, %I:%M%p\"))\n battletags = grab_column(SPREADSHEET_ID, \"D\", sheetname=\"Roster\")\n for index, battletag in enumerate(battletags, start=1):\n \"\"\"Checks to make sure there's something in the cell. If not, moves on to the next one\"\"\"\n try:\n player_tag = battletag[0]\n except IndexError:\n continue\n\n if player_tag != 'Battletag':\n \"\"\"Skips past the column headers\"\"\"\n OverbuffScraper.update_srs(playertag=player_tag,\n row=index,\n date=datetime.strftime(current_time, \"%d/%m/%y\"))\n time.sleep(2)\n\n srs = grab_column(SPREADSHEET_ID, \"F\", sheetname=\"Roster\")\n career_highs = grab_column(SPREADSHEET_ID, \"G\", sheetname=\"Roster\")\n for index, battletag in enumerate(battletags, start=1):\n try:\n player_tag = battletag[0]\n except IndexError:\n continue\n\n if player_tag != 'Battletag':\n if len(career_highs[index-1]) == 0 or int(srs[index-1][0]) > int(career_highs[index-1][0]):\n update_cell(SPREADSHEET_ID, (career_high_column + str(index)), {'values': [[srs[index-1][0]]]})\n print(\"{} has a new career high!\".format(player_tag))\n\n print(\"Scraper successfully completed at \" + datetime.strftime(datetime.now(), \"%I:%M%p\"))\n print(\"Please see above for errors\")\n # OverbuffScraper.update_srs(\"MilkSteak#11366\",\n # 157,\n # datetime.strftime(current_time, \"%d/%m/%y\"))\n","repo_name":"shadd-anderson/google-spreadsheet-updater","sub_path":"overbuff_scraper.py","file_name":"overbuff_scraper.py","file_ext":"py","file_size_in_byte":4837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18849981714","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 6 12:53:48 2018\n\n@author: Parvesh Joon\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 6 12:52:53 2018\n\n@author: Parvesh Joon\n\"\"\"\n\nfrom flask import Flask\nimport json\napp = Flask(__name__)\n\n\n@app.route(\"/getName/\")\ndef getName(state):\n with open(\"mocks/mocks3.json\") as input_file:\n people_state = json.load(input_file)\n flg = \"n\"\n for key,item in people_state.items():\n if key == state:\n flg = \"y\"\n return item \n if flg == \"n\":\n return \"State not Found\"\n# print(people_state)\n# return \"True\"\n\n@app.route(\"/getState/\")\ndef getState(name):\n with open(\"mocks/mocks3.json\") as input_file:\n people_state = json.load(input_file)\n flg = \"n\"\n for key,item in people_state.items():\n state = key\n name_list = [x.strip() for x in item.split(',')]\n for i in range(len(name_list)):\n if name_list[i] == name:\n flg = \"y\"\n return state\n if flg == \"n\":\n return \"Name not Found\"\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"parv1981/tasnix","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29778744337","text":"import os\nimport datetime\nimport json\nimport math\nimport logging\nimport dateutil.parser\nimport time\nimport traceback\nfrom celery import schedules as celery_schedules, current_app\nfrom celery.beat import Scheduler, ScheduleEntry\n\nlog = logging.getLogger(__name__)\n\nCELERYBEAT_MAX_LOOP_INTERVAL = 5 # seconds\nPERIODS = ('days', 'hours', 'minutes', 'seconds', 'microseconds')\nTOTAL_RUN_COUNT_KEY = 'total_run_count'\nLAST_RUN_AT_KEY = 'last_run_at'\nRUN_IMMEDIATELY_KEY = 'run_immediately'\n\n\ndef to_date(date_str):\n result = dateutil.parser.parse(date_str)\n return result\n\n\ndef get_schedules_filepath():\n root_path = os.path.abspath('')\n result = \"{}/proj/task_schedules.json\".format(root_path)\n return result\n\n\ndef get_schedules_status_filepath():\n root_path = os.path.abspath('')\n result = \"{}/proj/task_schedules_status.json\".format(root_path)\n return result\n\n\ndef load_schedules_from_file():\n filepath = get_schedules_filepath()\n with open(filepath, \"r\") as schedules_file:\n result = json.load(schedules_file)\n return result\n\n\ndef save_schedules_to_file(schedules):\n filepath = get_schedules_filepath()\n with open(filepath, 'w', encoding='utf-8') as f:\n json.dump(\n schedules, f, ensure_ascii=False, indent=4\n )\n\n\ndef load_schedules_status_from_file():\n filepath = get_schedules_status_filepath()\n if not os.path.exists(filepath):\n return {}\n with open(filepath, \"r\") as status_file:\n result = json.load(status_file)\n return result\n\n\ndef save_schedules_status_to_file(schedules_status):\n filepath = get_schedules_status_filepath()\n with open(filepath, 'w', encoding='utf-8') as f:\n json.dump(\n schedules_status, f, ensure_ascii=False, indent=4\n )\n\n\ndef convert_to_celery_schedule(task_schedule):\n if 'interval' in task_schedule and 'crontab' in task_schedule:\n raise Exception(\"Cannot define both interval and crontab schedule\")\n\n if 'interval' in task_schedule:\n interval = task_schedule['interval']\n if interval['period'] in PERIODS:\n result = (\n celery_schedules.schedule(\n datetime.timedelta(**{interval['period']: interval['every']})\n )\n )\n return result\n else:\n raise Exception(\n \"The value of an interval must be {}\".format(PERIODS)\n )\n elif 'crontab' in task_schedule:\n crontab = task_schedule['crontab']\n options = {}\n option_keys = (\n 'minute', 'hour', 'day_of_week', 'day_of_month', 'month_of_year'\n )\n for option_key in option_keys:\n value = crontab.get(option_key, None)\n if value is not None:\n options[option_key] = value\n result = celery_schedules.crontab(**options)\n return result\n else:\n raise Exception(\n \"You must define interval or crontab schedule - {}\".format(\n task_schedule\n )\n )\n\n\ndef get_task_options(task_schedule):\n option_keys = (\n 'queue', 'exchange', 'routing_key', 'expires', 'soft_time_limit'\n )\n options = {}\n for option_key in option_keys:\n if option_key in task_schedule:\n options[option_key] = task_schedule[option_key]\n return options\n\n\nclass CustomScheduleEntry(ScheduleEntry):\n def __init__(self, task, options={}, schedule=None, name=None, app=None):\n if schedule is not None:\n # to support Celery default entries\n converted_task = {\n 'name': name,\n 'task': task,\n 'options': options,\n 'schedule': schedule\n }\n self._task = converted_task\n else:\n self._task = task\n schedule = task.get('schedule', None)\n\n self.app = (\n app if app is not None\n else current_app._get_current_object()\n )\n\n if all(k in self._task for k in ('name', 'task')):\n self.name = self._task['name']\n self.task = self._task['task']\n else:\n raise Exception(\"'name' and 'task' are required!\")\n\n self.args = self._task.get('args', [])\n self.kwargs = self._task.get('kwargs', {})\n self.options = (\n options if options is not None\n else get_task_options(self._task)\n )\n self.schedule = (\n schedule if schedule is not None\n else convert_to_celery_schedule(self._task)\n )\n\n if not TOTAL_RUN_COUNT_KEY in self._task:\n status_dict = load_schedules_status_from_file()\n schedule_status = status_dict.get(self.name, {})\n self._task.setdefault(\n TOTAL_RUN_COUNT_KEY,\n schedule_status.get(TOTAL_RUN_COUNT_KEY, 0)\n )\n\n app_now = self.default_now()\n self._task.setdefault(\n LAST_RUN_AT_KEY,\n schedule_status.get(LAST_RUN_AT_KEY, app_now.isoformat())\n )\n\n self.total_run_count = self._task[TOTAL_RUN_COUNT_KEY]\n self.last_run_at = to_date(self._task[LAST_RUN_AT_KEY])\n\n def next(self):\n self._task[LAST_RUN_AT_KEY] = self.default_now().isoformat()\n self._task[TOTAL_RUN_COUNT_KEY] += 1\n self._task[RUN_IMMEDIATELY_KEY] = False\n return self.__class__(self._task)\n\n __next__ = next\n\n def is_due(self):\n start_after = self._task.get('start_after', None)\n if start_after is not None:\n now = self.default_now()\n start_time = to_date(start_after)\n if now < start_time:\n delay = math.ceil(\n (start_time - now).total_seconds()\n )\n return celery_schedules.schedstate(False, delay)\n max_run_count = self._task.get('max_run_count', -1)\n if max_run_count > 0 and self.total_run_count >= max_run_count:\n return celery_schedules.schedstate(False, None)\n run_immediately = self._task.get(RUN_IMMEDIATELY_KEY, False)\n if run_immediately:\n result = self.schedule.is_due(self.last_run_at)\n return celery_schedules.schedstate(True, result.next)\n result = self.schedule.is_due(self.last_run_at)\n return result\n\n def __repr__(self):\n return (u'<{0} ({1} {2}(*{3}, **{4}) {5})>'.format(\n self.__class__.__name__,\n self.name, self.task, self.args,\n self.kwargs, self.schedule\n ))\n\n\nclass CustomScheduler(Scheduler):\n Entry = CustomScheduleEntry\n\n def __init__(self, *args, **kwargs):\n self._schedule = {}\n self._schedule_file = get_schedules_filepath()\n self._last_file_timestamp = os.path.getmtime(self._schedule_file)\n\n Scheduler.__init__(self, *args, **kwargs)\n self.max_interval = (\n kwargs.get('max_interval')\n or self.app.conf.beat_max_loop_interval\n or CELERYBEAT_MAX_LOOP_INTERVAL\n )\n\n def setup_schedule(self):\n self.sync()\n schedule_settings = load_schedules_from_file()\n self._schedule = {}\n for schedule in schedule_settings:\n if schedule['enabled']:\n self._schedule[schedule['name']] = self.Entry(schedule)\n self.install_default_entries(self._schedule)\n log.info(\n 'Current schedule:\\n' + '\\n'.join(\n repr(entry) for entry in self._schedule.values()\n )\n )\n\n def requires_update(self):\n ftimestamp = os.path.getmtime(get_schedules_filepath())\n if (ftimestamp > self._last_file_timestamp):\n self._last_file_timestamp = ftimestamp\n return True\n return False\n\n def get_schedule(self):\n if self.requires_update():\n self.setup_schedule()\n return self._schedule\n\n def set_schedule(self, schedule):\n self._schedule = schedule\n \n schedule = property(get_schedule, set_schedule)\n\n @property\n def sync_every(self):\n return self.app.conf.beat_sync_every or 1\n\n @property\n def info(self):\n return 'JSON schedule file -> {self._schedule_file}'.format(self=self)\n\n def sync(self):\n if len(self._schedule.values()) == 0:\n return\n status_dict = load_schedules_status_from_file()\n try:\n for entry in self._schedule.values():\n status = {}\n schedule_name = entry.name\n status[TOTAL_RUN_COUNT_KEY] = entry.total_run_count\n status[LAST_RUN_AT_KEY] = entry.last_run_at.isoformat()\n status_dict[schedule_name] = status\n save_schedules_status_to_file(status_dict)\n except Exception:\n log.error(traceback.format_exc())\n\n\nif __name__ == \"__main__\":\n test = CustomScheduler(app=current_app)\n pass\n","repo_name":"richardgyq/Celery-JSON-scheduler","sub_path":"proj/custom_scheduler.py","file_name":"custom_scheduler.py","file_ext":"py","file_size_in_byte":8940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73894083253","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/5/13 20:17\n# @Author : Wu Tianyu\n# 边缘检测\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport cv2\n\n\n# 边缘检测 canny\ndef canny_test():\n img = cv2.imread('img/chouyou.jpg', 0)\n edges = cv2.Canny(img, 100, 200)\n plt.subplot(121), plt.imshow(img, cmap='gray'), plt.title('Origin')\n plt.xticks([]), plt.yticks([])\n plt.subplot(122), plt.imshow(edges, cmap='gray'), plt.title('Canny')\n plt.xticks([]), plt.yticks([])\n plt.show()\n\n\nif __name__ == '__main__':\n canny_test()\n","repo_name":"CynicalHeart/PyLearn","sub_path":"CV_Learn/Imgp_Learn07.py","file_name":"Imgp_Learn07.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6504264463","text":"print(\"Este programa calcula o valor idela de calorias para homens e mulheres\")\r\n\r\nsexo = input(\"Digite qual é o seu sexo: \")\r\npeso = float(input(\"Digite o seu peso (kg): \"))\r\naltura = float(input(\"Digite a sua altura (cm): \"))\r\nidade = int(input(\"Digite a sua idade: \"))\r\n\r\nif sexo.lower() == \"maculino\":\r\n homem = 66 + (13.7 * peso) + (5 * altura) - (6.8 * idade)\r\n \r\n print(\"A quantidade de caloriais diarias recomendavel para você é de: \" +str(homem) +\" calorias\")\r\n\r\nelif sexo.lower() == \"feminino\":\r\n mulher = 665 + (9.6 * peso) + (1.8 * altura) - (4.7 * idade)\r\n \r\n print(\"A quatidade de calorias diarias recomendavel para você é de: \" +str(mulher))","repo_name":"zinh9/Lista-de-Exercicio-2","sub_path":"Exercicio 9.py","file_name":"Exercicio 9.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1328865861","text":"import matplotlib.pyplot as plt\nimport coord.coord_triangle as tri\n\n\nif __name__ == \"__main__\":\n a = float(input('Enter x for a = (x, y): ')), float(input('Enter y for a = (x, y): '))\n b = float(input('Enter x for b = (x, y): ')), float(input('Enter y for b = (x, y): '))\n c = float(input('Enter x for c = (x, y): ')), float(input('Enter y for c = (x, y): '))\n\n triangle0 = tri.CoordinateTriangle(a, b, c)\n\n plt.plot([triangle0.v0[0], triangle0.v1[0]], [triangle0.v0[1], triangle0.v1[1]])\n plt.plot([triangle0.v1[0], triangle0.v2[0]], [triangle0.v1[1], triangle0.v2[1]])\n plt.plot([triangle0.v2[0], triangle0.v0[0]], [triangle0.v2[1], triangle0.v0[1]])\n\n plt.scatter([triangle0.v0[0], triangle0.v1[0], triangle0.v2[0]],\n [triangle0.v0[1], triangle0.v1[1], triangle0.v2[1]])\n plt.annotate('A ' + str(triangle0.v0), [triangle0.v0[0], triangle0.v0[1]])\n plt.annotate('B ' + str(triangle0.v1), [triangle0.v1[0], triangle0.v1[1]])\n plt.annotate('C ' + str(triangle0.v2), [triangle0.v2[0], triangle0.v2[1]])\n\n centroid = triangle0.centroid()\n circumcenter = triangle0.circumcenter()\n orthocenter = triangle0.orthocenter()\n incenter = triangle0.incenter()\n\n plt.scatter(centroid[0], centroid[1])\n plt.scatter(circumcenter[0], circumcenter[1])\n plt.scatter(orthocenter[0], orthocenter[1])\n plt.scatter(incenter[0], incenter[1])\n\n plt.annotate('Centroid ' + str(centroid), [centroid[0], centroid[1]])\n plt.annotate('Circumcenter ' + str(circumcenter), [circumcenter[0], circumcenter[1]])\n plt.annotate('Orthocenter ' + str(orthocenter), [orthocenter[0], orthocenter[1]])\n plt.annotate('Incenter ' + str(incenter), [incenter[0], incenter[1]])\n\n plt.show()","repo_name":"cmims/triangular","sub_path":"coord/plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34011066421","text":"# RaspEasyClientGraph.py\n\nfrom tcpcom import TCPClient\nfrom gpanel import *\nimport time\n\ndef onStateChanged(state, msg):\n global t\n if state == \"CONNECTING\":\n title(\"Waiting for connection...\")\n elif state == \"CONNECTED\":\n title(\"Connection estabished.\")\n elif state == \"MESSAGE\":\n data = int(msg)\n if t == 0:\n move(t, data)\n else:\n draw(t, data)\n t += 0.1\n if t > 10:\n time.sleep(100)\n clear()\n drawGrid(0, 10, 0, 1000)\n\ndef onExit():\n client.disconnect()\n dispose()\n \nport = 5000 # IP port\nhost = \"192.168.1.106\"\nmakeGPanel(-1, 11, -100, 1100)\ndrawGrid(0, 10, 0, 1000, \"gray\")\naddExitListener(onExit)\nt = 0\nclient = TCPClient(host, port, stateChanged = onStateChanged)\nrc = client.connect()\nif not rc:\n title(\"Connection failed\")","repo_name":"raspibrick/install","sub_path":"rpi-tutorial/RaspEasyClientGraph.py","file_name":"RaspEasyClientGraph.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4629051927","text":"import tkinter as tk\nfrom tkinter import messagebox\nimport random\n\n\nclass SudokuApp:\n def __init__(self, root):\n self.root = root\n self.root.title(\"Sudoku App\")\n self.solution = None # Add this attribute to store the solution\n self.create_ui()\n\n def create_ui(self):\n self.cells = []\n self.solution = [[0] * 9 for _ in range(9)]\n\n for i in range(9):\n row = []\n for j in range(9):\n cell = tk.Entry(\n self.root, width=2, font=(\"Arial\", 18), justify=\"center\"\n )\n cell.grid(row=i, column=j, padx=1, pady=1)\n row.append(cell)\n self.cells.append(row)\n\n generate_button = tk.Button(\n self.root, text=\"Generate\", command=self.generate_puzzle\n )\n generate_button.grid(row=9, column=0, columnspan=3, padx=10, pady=5)\n\n solve_button = tk.Button(self.root, text=\"Solve\", command=self.solve_sudoku)\n solve_button.grid(row=9, column=3, columnspan=3, padx=10, pady=5)\n\n verify_button = tk.Button(\n self.root, text=\"Verify\", command=self.verify_solution\n )\n verify_button.grid(row=9, column=6, columnspan=3, padx=10, pady=5)\n\n clear_button = tk.Button(self.root, text=\"Clear\", command=self.clear_grid)\n clear_button.grid(row=10, column=0, columnspan=9, padx=10, pady=5)\n\n self.difficulty_var = tk.StringVar()\n self.difficulty_var.set(\"Easy\")\n difficulty_menu = tk.OptionMenu(\n self.root, self.difficulty_var, \"Easy\", \"Medium\", \"Hard\"\n )\n difficulty_menu.grid(row=11, column=0, columnspan=9, padx=10, pady=5)\n\n def generate_puzzle(self):\n self.clear_grid()\n\n self.solution = self.generate_sudoku_solution() # Store the solution\n self.solution_backup = [\n row[:] for row in self.solution\n ] # Store a backup of the solution\n\n difficulty = self.difficulty_var.get()\n if difficulty == \"Medium\":\n num_to_remove = 40\n elif difficulty == \"Hard\":\n num_to_remove = 50\n else:\n num_to_remove = 30\n\n self.fill_cells(num_to_remove)\n self.update_grid_with_solution()\n\n def update_grid_with_solution(self):\n for i in range(9):\n for j in range(9):\n value = self.solution[i][j]\n if value != 0:\n self.cells[i][j].insert(0, str(value))\n self.cells[i][j].config(state=\"disabled\")\n\n def generate_sudoku_solution(self):\n n = 9\n\n def is_valid(board, row, col, num):\n for i in range(n):\n if board[row][i] == num or board[i][col] == num:\n return False\n\n start_row, start_col = 3 * (row // 3), 3 * (col // 3)\n for i in range(3):\n for j in range(3):\n if board[start_row + i][start_col + j] == num:\n return False\n return True\n\n def solve(board):\n for row in range(n):\n for col in range(n):\n if board[row][col] == 0:\n nums = list(range(1, n + 1))\n random.shuffle(nums)\n for num in nums:\n if is_valid(board, row, col, num):\n board[row][col] = num\n if solve(board):\n return True\n board[row][col] = 0\n return False\n return True\n\n board = [[0 for _ in range(n)] for _ in range(n)]\n solve(board)\n return board\n\n def fill_cells(self, num_to_remove):\n cells_to_remove = set()\n\n while len(cells_to_remove) < num_to_remove:\n row = random.randint(0, 8)\n col = random.randint(0, 8)\n if (row, col) not in cells_to_remove and self.solution[row][col] != 0:\n cells_to_remove.add((row, col))\n\n for row, col in cells_to_remove:\n self.solution[row][col] = 0\n self.cells[row][col].delete(0, tk.END)\n self.cells[row][col].config(state=\"normal\")\n\n def solve_sudoku(self):\n if self.solution_backup:\n for i in range(9):\n for j in range(9):\n if self.cells[i][j].get() == \"\":\n self.cells[i][j].delete(0, tk.END)\n self.cells[i][j].insert(0, str(self.solution_backup[i][j]))\n\n def verify_solution(self):\n user_solution = []\n for i in range(9):\n row = []\n for j in range(9):\n value = self.cells[i][j].get()\n if value == \"\":\n row.append(0)\n else:\n try:\n int_value = int(value)\n row.append(int_value)\n except ValueError:\n messagebox.showerror(\n \"Verification\", \"Please enter integers in all cells.\"\n )\n return\n user_solution.append(row)\n\n if self.is_valid_solution(user_solution):\n messagebox.showinfo(\n \"Verification\", \"Congratulations! You've solved the puzzle.\"\n )\n else:\n messagebox.showerror(\n \"Verification\", \"Sorry, the puzzle solution is incorrect.\"\n )\n\n def is_valid_solution(self, solution):\n def is_valid_unit(unit):\n unit_numbers = [num for num in unit if num != 0]\n return (\n len(unit_numbers) == len(set(unit_numbers))\n and all(1 <= num <= 9 for num in unit_numbers)\n and sum(unit_numbers) == sum(range(1, 10))\n )\n\n # Check rows\n for row in solution:\n if not is_valid_unit(row):\n return False\n\n # Check columns\n for col in range(9):\n column = [solution[row][col] for row in range(9)]\n if not is_valid_unit(column):\n return False\n\n # Check subgrids (3x3 boxes)\n for i in range(0, 9, 3):\n for j in range(0, 9, 3):\n subgrid = [\n solution[row][col]\n for row in range(i, i + 3)\n for col in range(j, j + 3)\n ]\n if not is_valid_unit(subgrid):\n return False\n\n return True\n\n def clear_grid(self):\n for i in range(9):\n for j in range(9):\n self.cells[i][j].delete(0, tk.END)\n self.cells[i][j].config(state=\"normal\")\n self.solution[i][j] = 0\n for i in range(9):\n for j in range(9):\n self.cells[i][j].delete(0, tk.END)\n self.cells[i][j].config(state=\"normal\")\n self.solution[i][j] = 0\n\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n app = SudokuApp(root)\n root.resizable(width=False, height=False)\n root.mainloop()\n","repo_name":"Niravanaa/SudokuPuzzleSolver","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1673626936","text":"from crispy_forms.bootstrap import StrictButton\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Row, Column, Field, Submit, HTML, Div, Button\nfrom django import forms\nfrom django.urls import reverse_lazy, reverse\n\nfrom etilog.forms.fields_filter import (DateYearPickerField, DateYearPicker)\nfrom etikicapture.fields import ImpactEventBtns, RowTagsButton, LabelInputRow, TagsButton, \\\n ColDomainSelect, ColTendencySelect\nfrom etilog.forms.forms_filter import NotReqCharF\nfrom etilog.models import ImpactEvent, Company, SubsidiaryOwner, SupplierRecipient, Reference, SustainabilityTag\n\nCSS_COL_CLS = 'col-12 col-lg-6'\n\n_PH_COMPANY = 'e.g. Coca Cola, Apple …'\n_PH_COUNTRY = 'e.g. Switzerland, France … '\n_PH_REFERENCE = 'e.g. New York Times, Guardian …'\n_PH_LANGUAGE = 'e.g. English, Deutsch'\n\n\nclass ImpactEventForm(forms.ModelForm):\n \"\"\"\n model form to create an impact event.\n fields default -> validation correct.\n widgets customized.\n \"\"\"\n tags_select = NotReqCharF()\n tags_drop = NotReqCharF()\n\n def __init__(self, request, *args, **kwargs):\n\n super(ImpactEventForm, self).__init__(*args, **kwargs)\n\n self.fields['source_url'].required = True\n\n\n\n # crispy form layout:\n self.helper = FormHelper()\n self.helper.form_id = 'id_impevform'\n #self.helper.form_action = reverse('etikicapture:newimpactevent',)\n self.helper.layout = Layout(\n\n RowTagsButton('source_url', 'col-12',\n taginput=False,\n addmodel=False,\n icon_name='fa fa-glasses',\n placeholder='paste URL',\n autofocus=True),\n\n #first hidden\n Div(\n ImpEvMainFields(request),\n\n Field('comment', rows=3),\n\n Div(\n Row(Column(Field('date_text'), css_class=CSS_COL_CLS)),\n Field('article_title'),\n Field('article_text'),\n Field('article_byline'),\n Field('article_html'),\n Field('result_parse_html'),\n css_class='collapse',\n css_id='div_article_fields'\n ),\n\n ImpactEventBtns(request),\n\n\n style='display: none;', # for fadeIn\n css_id='div_main_fields'\n\n ),\n )\n\n class Meta: # only for model fields\n model = ImpactEvent\n fields = ['source_url',\n # first part hidden\n 'date_published', 'date_impact', 'company', 'reference',\n 'country',\n 'language',\n 'sust_domain', 'sust_tendency', 'sust_tags',\n 'summary',\n # from here only for etikis\n 'comment',\n 'article_text', 'article_title', 'date_text', 'article_byline',\n 'article_html', 'result_parse_html',\n 'user'\n ]\n\n widgets = {\n 'source_url': forms.URLInput(attrs={'placeholder': 'url to the article',\n\n }),\n 'country': forms.TextInput(),\n 'company': forms.TextInput(),\n 'reference': forms.TextInput(),\n 'sust_tags': forms.TextInput(),\n 'tags_select': forms.TextInput(),\n 'tags_drop': forms.TextInput(),\n\n 'sust_domain': forms.TextInput(),\n 'sust_tendency': forms.TextInput(),\n\n 'date_published': DateYearPicker(),\n 'date_impact': DateYearPicker(),\n 'comment': forms.Textarea(),\n 'summary': forms.Textarea(),\n 'article_html': forms.TextInput(),\n }\n\n # if added labels here -> correct required or not\n labels = {\n 'date_published': ('When was it published'),\n 'date_impact': ('When did it happen'),\n 'company': ('Which company was concerned'),\n 'reference': ('Where was it published?'),\n 'country': 'Where did it happen',\n 'sust_domain': 'Which Category?',\n 'sust_tendency': 'Which Tendency?',\n 'sust_tags': '',\n }\n help_texts = {\n 'date_published': (''),\n 'date_impact': ('optional'),\n 'summary': ('optional'),\n 'country': 'optional',\n 'comment': 'optional',\n }\n\n\nclass ImpEvMainFields(Layout):\n def __init__(self, request):\n layout_list = [\n\n\n RowTagsButton('company', 'col-12',\n placeholder=_PH_COMPANY,\n request=request),\n\n LabelInputRow(ColDomainSelect('sust_domain')),\n\n LabelInputRow(ColTendencySelect('sust_tendency')),\n\n LabelInputRow(\n rowcontent=[TagsButton('tags_select', 'col-12 div_tags_select', taginput='c_tags_select',\n addmodel=False),\n Div(HTML('

'), css_class='mx-auto'),\n\n TagsButton('tags_drop', 'col-12 div_tags_drop',\n placeholder='Search Tags',\n taginput='c_tags_search_inp c_tags_drop',\n field_hidden='sust_tags',\n request=request),\n ],\n labelname='Select Sustainability Topics'\n ),\n RowTagsButton('reference', 'col-12',\n placeholder=_PH_REFERENCE,\n request=request),\n\n\n\n\n LabelInputRow(\n Column(\n DateYearPickerField('date_published', 'e.g. 17.08.2003', css_class='',\n data_category='date_from'\n ),\n css_class='col-12 d-flex flex-wrap justify-content-start' # wraps if needed\n ),\n\n ),\n LabelInputRow(\n Column(\n DateYearPickerField('date_impact', 'e.g. 18.12.1999', css_class='',\n data_category='date_to'\n ),\n css_class='col-12 d-flex flex-wrap justify-content-start' # wraps if needed\n )\n\n ),\n\n RowTagsButton('country', 'col-12',\n placeholder=_PH_COUNTRY,\n addmodel=False,\n ),\n Field('language', css_class='col-12',\n placeholder=_PH_LANGUAGE,\n addmodel=False,\n ),\n\n Field('summary', rows=3, placeholder='Short summary of content'),\n\n ]\n super(ImpEvMainFields, self).__init__(*layout_list)\n\n\n_FOREIGN_MODEL_CLS = 'foreignModel'\n\n\nclass CompanyForm(forms.ModelForm):\n '''\n form to create a company\n '''\n\n #queryset needs to all so it gets correctly validated\n owner = forms.ModelMultipleChoiceField(queryset=Company.objects.all(),\n required=False) # for validation\n subsidiary = forms.ModelMultipleChoiceField(queryset=Company.objects.all(),\n required=False)\n supplier = forms.ModelMultipleChoiceField(queryset=Company.objects.all(),\n required=False)\n recipient = forms.ModelMultipleChoiceField(queryset=Company.objects.all(),\n required=False)\n\n def __init__(self, *args, **kwargs):\n super(CompanyForm, self).__init__(*args, **kwargs)\n\n # as they are not model fields -> widget needs to be adapet here\n self.fields['owner'].widget = forms.TextInput()\n self.fields['subsidiary'].widget = forms.TextInput()\n self.fields['supplier'].widget = forms.TextInput()\n self.fields['recipient'].widget = forms.TextInput()\n\n self.helper = FormHelper(self)\n self.helper.form_class = _FOREIGN_MODEL_CLS\n self.helper.form_action = reverse_lazy('etikicapture:add_foreignmodel',\n kwargs={'main_model': 'impev',\n 'foreign_model': 'company'})\n\n self.helper.layout = Layout(\n RowTagsButton('name', 'col-12',\n taginput=False,\n addmodel=False,\n placeholder=_PH_COMPANY,),\n\n RowTagsButton('country', 'col-12',\n placeholder=_PH_COUNTRY,\n addmodel=False,\n ),\n\n Field('activity'),\n Field('comment', rows=3),\n\n RowTagsButton('owner', 'col-12',\n placeholder=_PH_COMPANY,\n addmodel=False,),\n RowTagsButton('subsidiary', 'col-12',\n placeholder=_PH_COMPANY,\n addmodel=False,),\n RowTagsButton('supplier', 'col-12',\n placeholder=_PH_COMPANY,\n addmodel=False,),\n RowTagsButton('recipient', 'col-12',\n placeholder=_PH_COMPANY,\n addmodel=False,),\n\n Submit('submit-name', '', css_id='id_submit_fm', css_class='d-none'),\n\n )\n\n def save(self, commit=True):\n main = self.instance\n main.save() # save main instance\n # add relations:\n if self.cleaned_data['subsidiary'] is not None:\n for comp in self.cleaned_data['subsidiary']: # comes as queryset\n SubsidiaryOwner.objects.update_or_create(owner_company=main,\n subsidiary_company=comp)\n if self.cleaned_data['owner'] is not None:\n for comp in self.cleaned_data['owner']: # comes as queryset\n SubsidiaryOwner.objects.update_or_create(owner_company=comp,\n subsidiary_company=main)\n if self.cleaned_data['supplier'] is not None:\n for comp in self.cleaned_data['supplier']: # comes as queryset\n SupplierRecipient.objects.update_or_create(recipient_company=main,\n supplier_company=comp)\n if self.cleaned_data['recipient'] is not None:\n for comp in self.cleaned_data['recipient']: # comes as queryset\n SupplierRecipient.objects.update_or_create(recipient_company=comp,\n supplier_company=main)\n return main\n\n class Meta: # only for model fields\n model = Company\n exclude = ['owner_old', 'subsidiary_old', 'supplier_old', 'recipient_old',\n 'subsidiary_to_owner', 'supplier_to_recipient'\n ]\n\n widgets = {\n\n 'country': forms.TextInput(),\n\n\n\n 'comment': forms.Textarea(),\n\n }\n\n\nclass ReferenceForm(forms.ModelForm):\n '''\n form to create a reference\n '''\n\n def __init__(self, *args, **kwargs):\n super(ReferenceForm, self).__init__(*args, **kwargs)\n\n self.helper = FormHelper(self)\n self.helper.form_class = _FOREIGN_MODEL_CLS\n self.helper.form_action = reverse_lazy('etikicapture:add_foreignmodel',\n kwargs={'main_model': 'impev',\n 'foreign_model': 'reference'})\n self.helper.layout = Layout(\n\n RowTagsButton('name', 'col-12',\n taginput=False,\n addmodel=False,\n placeholder=_PH_REFERENCE, ),\n\n Field('mediaform'),\n\n RowTagsButton('company', 'col-12',\n addmodel=False,\n placeholder=_PH_COMPANY),\n\n RowTagsButton('country', 'col-12',\n placeholder=_PH_COUNTRY,\n addmodel=False,\n ),\n\n Field('comment', rows=3),\n Submit('submit-name', '', css_id='id_submit_fm', css_class='d-none'),\n\n\n )\n\n class Meta: # only for model fields\n model = Reference\n exclude = []\n\n widgets = {\n\n 'company': forms.TextInput(),\n 'country': forms.TextInput(),\n\n 'comment': forms.Textarea(),\n\n }\n\n\nclass TopicTagsForm(forms.ModelForm):\n '''\n form to create a topic tag\n '''\n\n def __init__(self, *args, **kwargs):\n super(TopicTagsForm, self).__init__(*args, **kwargs)\n\n self.fields['sust_domains'].required = True\n self.fields['sust_tendency'].required = True\n\n self.helper = FormHelper(self)\n self.helper.form_class = _FOREIGN_MODEL_CLS\n self.helper.form_action = reverse_lazy('etikicapture:add_foreignmodel',\n kwargs={'main_model': 'impev',\n 'foreign_model': 'tags'})\n self.helper.layout = Layout(\n\n RowTagsButton('name', 'col-12',\n taginput=False,\n addmodel=False,\n placeholder='e.g. Reporting, Child Labor', ),\n\n LabelInputRow(ColDomainSelect('sust_domains', id_prefix='fmodel_', field_css_class='many-values')),\n\n LabelInputRow(ColTendencySelect('sust_tendency', id_prefix='fmodel_')),\n\n\n Field('comment', rows=3),\n Field('description', rows=3),\n Submit('submit-name', '', css_id='id_submit_fm', css_class='d-none'),\n\n\n )\n\n class Meta: # only for model fields\n model = SustainabilityTag\n exclude = ['impnr', ]\n\n widgets = {\n\n 'sust_domains': forms.TextInput(),\n 'sust_tendency': forms.TextInput(),\n\n 'comment': forms.Textarea(),\n 'description': forms.Textarea(),\n\n }","repo_name":"hodeld/etiki-prototype1","sub_path":"etikicapture/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":14343,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"40909520307","text":"import os\nfrom bs4 import BeautifulSoup as bs\nfrom bs4.builder import HTML \nimport re\nimport json\nimport string\nfrom nltk.downloader import update\nimport nltk\n\nimport DataCleanup as dc\n\ndef getNames():\n # Dictionaries initialisieren\n adjectives = {}\n adverbs = {}\n names = {}\n # Ausnahmen auslesen\n with open(os.path.join('Data','pos','exceptions.json'),'r',encoding='utf-8') as f:\n text = f.read().replace('\\xad','')\n d = json.loads(text)\n ex = d.keys()\n # # Dateien auslesen\n with open(os.path.join('Data','REFINED','master.txt'),'r',encoding='utf-8') as f:\n text = f.read().replace('\\xad','')\n # Named Entities erzeugen\n entities = dc.getNamedEntities(text)\n # Über Entities iterieren, um Entities den Dictionaries zuzuordnen\n for e in entities:\n # Adjektiv\n if 'JJ' in e: \n key = e[0]\n value = e[1]\n adjectives.update({key:value})\n # Adjektiv\n if 'JJS' in e:\n key = e[0]\n value = e[1]\n adjectives.update({key:value})\n # Adjektiv\n if 'JJR' in e:\n key = e[0]\n value = e[1]\n adjectives.update({key:value})\n # Adverb\n if 'RB' in e: \n key = e[0]\n value = e[1]\n adverbs.update({key:value})\n # Adverb\n if 'RBR' in e:\n key = e[0]\n value = e[1]\n adverbs.update({key:value})\n # Adverb\n if 'RBS' in e:\n key = e[0]\n value = e[1]\n adverbs.update({key:value})\n # Namen\n if hasattr(e,'label') and e[0][0] not in ex:\n key = e[0][0]\n value = e.label()\n names.update({key:value})\n print(names)\n # JSON-Dateien befüllen\n path = os.path.join('Data','pos')\n with open(os.path.join(path,'adjectives.json'),'w',encoding='utf-8') as f:\n json.dump(adjectives,f,indent=4,ensure_ascii=False)\n print('json befüllt')\n with open(os.path.join(path,'adverbs.json'),'w',encoding='utf-8') as f:\n json.dump(adverbs,f,indent=4,ensure_ascii=False)\n print('json befüllt')\n with open(os.path.join(path,'names.json'),'w',encoding='utf-8') as f:\n json.dump(names,f,indent=4,ensure_ascii=False)\n print('json befüllt')\n\ndef getDescriptives():\n # JSON-Dateien, die binär darstellen, ob gefundene Adjektive und Adverbe deskriptiv sind\n with open(os.path.join('Data','REFINED','master.txt'),'r',encoding='utf-8') as f:\n text = f.read().replace('\\xad','')\n\n entities = dc.getNamedEntities(text)\n \n descriptors = {}\n for e in entities:\n if 'JJ' in e:\n key = e[0]\n descriptors.update({key:0})\n if 'JJS' in e:\n key = e[0]\n descriptors.update({key:0})\n if 'JJC' in e:\n key = e[0]\n descriptors.update({key:0})\n if 'RB' in e:\n key = e[0]\n descriptors.update({key:0})\n if 'RBR' in e:\n key = e[0]\n descriptors.update({key:0})\n if 'RBS' in e:\n key = e[0]\n descriptors.update({key:0})\n \n path = os.path.join('Data','pos')\n with open(os.path.join(path,'isdescriptive.json'),'w',encoding='utf-8') as f:\n json.dump(descriptors,f,indent=4,ensure_ascii=False)\n print('json befüllt')\n\ndef getSentences():\n # Namen auslesen\n # Variablen definieren\n n = {}\n names = []\n with open(os.path.join('Data','pos','names.json'),'r',encoding='utf-8') as f:\n content = f.read().replace('\\xad','') # \\xad = optional -\n n = json.loads(content)\n for key in n.keys():\n names.append(key.lower())\n # Adjektive und Adverben auslesen\n # Alle Texte auslesen\n for i in range(0,7):\n filename = 'sentenceswithnames'+str(i)+'.txt'\n with open(os.path.join('Data','REFINED',str(i)+'.txt'),'r',encoding='utf-8') as f:\n content = f.read().replace('\\xad','') \n # Sätze unterteilen\n sentences = nltk.sent_tokenize(content,language='english')\n # Variablen definieren\n filtered_text = ''\n # Dateien erstellen\n if os.path.exists(os.path.join('Data','RESULTS',filename)):\n with open(os.path.join('Data','RESULTS',filename),'w',encoding='utf-8') as f:\n f.write('')\n for sentence in sentences:\n seq = nltk.word_tokenize(sentence.lower())\n l_seq = len(seq)-1\n filtered_text += findWordInSentence(sentence,seq,names,l_seq)\n with open(os.path.join('Data','RESULTS',filename),'a',encoding='utf-8') as f:\n f.write(filtered_text)\n\ndef findWordInSentence(sentence,seq,comparison:list,i):\n if seq[i] in comparison:\n return sentence+'\\n\\n'\n elif i>0:\n return findWordInSentence(sentence,seq,comparison,i-1)\n else:\n return ''","repo_name":"tessajo/SexistHarryPotter","sub_path":"DataExtraction.py","file_name":"DataExtraction.py","file_ext":"py","file_size_in_byte":5143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34251377650","text":"import time\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nimport numpy as np\nfrom sklearn import svm\nfrom tqdm import tqdm\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.feature_extraction import text\nfrom nltk.stem import porter\nfrom sklearn.decomposition import PCA, TruncatedSVD\nimport matplotlib.pyplot as plt\nfrom nltk.corpus import stopwords\nfrom sklearn.preprocessing import normalize\n\nfrom NLPLib.DSP import CleanedDataset1, Dataset1\n\ndef analyse_ds(dataset:Dataset1, use_idf:bool=False, n_grams:int=1, additional_stop_words:set={}, max_features:int=5000):\n stop_words = text.ENGLISH_STOP_WORDS.union(additional_stop_words) # To add more stop words\n\n stemmer = porter.PorterStemmer()\n pattern = r\"\\w*[a-z]+\"\n\n analyzer = CountVectorizer(token_pattern=pattern, ngram_range=(n_grams, n_grams), max_features=max_features,lowercase=True, stop_words=stop_words).build_analyzer()\n stem_analyzer = lambda doc: (stemmer.stem(w) for w in analyzer(doc))\n \n # pattern=r\"(?u)\\b\\w\\w+\\b\"\n CountVec = CountVectorizer(analyzer=stem_analyzer)\n \n X = CountVec.fit_transform(tqdm(dataset.getSample(returnLabel=False), total=dataset.getLength()))\n return CountVec.get_feature_names_out(), X\n\nif __name__ == \"__main__\":\n np.random.seed(10)\n\n \n\n n_word = 10\n stop_word = {'reuters'}\n dataset = Dataset1()\n # features_names, X = analyse_ds(dataset, max_features=n_word, additional_stop_words=stop_word)\n\n # word_freq = X.toarray().sum(axis=0)\n\n # perm = np.argsort(word_freq)[-n_word:] # Selecting n_word most frequent words\n # word_freq = word_freq[perm]\n # features_names = features_names[perm]\n\n # print(features_names)\n # print(word_freq)\n\n # plt.bar(x=features_names, height=word_freq)\n # plt.show()\n\n # X_reduce = TruncatedSVD(n_components=5).fit_transform(normalize(X))\n # labels = np.array([i for i in dataset.getLabel()])\n\n # perm_labels = np.argsort(labels)\n # labels = labels[perm_labels]\n # X_reduce = X_reduce[perm_labels, :]\n\n # X_reduce_1 = X_reduce[np.where(labels)]\n # X_reduce_2 = X_reduce[:np.where(labels)[0][0]]\n\n # plt.scatter(X_reduce_1[:, 0], X_reduce_1[:, 1], marker='x')\n # plt.scatter(X_reduce_2[:, 0], X_reduce_2[:, 1], marker='o')\n # plt.show()\n \n\n textSizesTrue = []\n textSizesFake = []\n nbText = 0\n\n for (sample, label) in tqdm(dataset.getSample(training=True, testing=True), total=dataset.getLength()):\n sentence = sample.split()\n \n if label == 1: textSizesTrue.append(len(sentence))\n else: textSizesFake.append(len(sentence))\n \n\n textSizes = textSizesFake + textSizesTrue\n print(f\"Average number of word: {np.mean(np.array(textSizes))} words\")\n print(f\"Variance of the number of word: {np.std(np.array(textSizes))}\")\n print(f\"Minimum number of word: {np.min(np.array(textSizes))} words\")\n print(f\"Maximum number of word: {np.max(np.array(textSizes))} words\")\n\n\n print(f\"Number of sentences longer than 3000 words {np.sum(np.array(textSizes) > 3000)}\")\n print(f\"Number of sentences shorter than 5 words {np.sum(np.array(textSizes) <= 5)}\")\n\n plt.title(\"Length of the True articles.\")\n plt.hist(textSizesTrue, bins=200)\n plt.show()\n plt.title(\"Length of the Fake articles.\")\n plt.hist(textSizesFake, bins=200)\n plt.show()\n\n \n\n","repo_name":"Fake-News-Detection-Project/Fake-News-Detection","sub_path":"dataset_analysis.py","file_name":"dataset_analysis.py","file_ext":"py","file_size_in_byte":3381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70471104053","text":"# Data Wrangling?\n# 원본 데이터를 정제하고 사용 가능한 형태로 구성하기 위한 변환 과정을 광범위하게 의미하는 비공식적인 용어\n\nimport pandas as pd\n\n# Data url\nurl = 'https://raw.githubusercontent.com/chrisalbon/simulated_datasets/master/titanic.csv'\n\n# Data loading into dataframe 데이터프레임으로 데이터 적재\ndataframe = pd.read_csv(url)\n\nprint(dataframe.shape)\n\n# print(dataframe.head(5))","repo_name":"jeewonkimm2/scikitlearn_review","sub_path":"from_scratch/ch3_data_wrangling/0_Intro.py","file_name":"0_Intro.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"194678304","text":"# *coding:utf-8 *\n\nimport os\nimport cv2\nimport json\n\nanno_file_path = r'C:\\Users\\38698\\work_space\\data\\20220929101743\\val\\via_region_data.json'\n\nsaveFolder = r'C:\\Users\\38698\\work_space\\data\\showroom\\labels\\val'\nif not os.path.exists(saveFolder):\n os.makedirs(saveFolder)\n\nannotations = json.load(open(anno_file_path, 'r', encoding='UTF-8'))\nimgs = annotations # [\"_via_img_metadata\"]\n\nobjClass = 0\nmodel_name = os.path.abspath('..').split('/')[-1]\n\n# 遍历每个图片\nfor imgId in imgs:\n filename = imgs[imgId]['filename']\n imgName = filename.split('.')[0]\n # print('filename:', filename)\n regions = imgs[imgId]['regions']\n # if len(regions) <= 0:\n # continue\n img_dir = \"\"\n for idx, i in enumerate(anno_file_path.split('\\\\')):\n if idx != len(anno_file_path.split('\\\\')) - 1:\n img_dir += i\n img_dir += '/'\n img_dir += filename\n img = cv2.imread(img_dir)\n WIDTH = img.shape[1]\n HEIGHT = img.shape[0]\n data = ''\n # 遍历每个区域\n for region in regions:\n # print(region)\n shape = region['shape_attributes']\n x = shape['all_points_x']\n y = shape['all_points_y']\n # boxW = shape['width']\n # boxH = shape['height']\n\n # minX = int(x)\n # minY = int(y)\n # maxX = int(x + boxW)\n # maxY = int(y + boxH)\n minX = min(x)\n minY = min(y)\n maxX = max(x)\n maxY = max(y)\n\n centerX = round((minX + maxX) / 2 / WIDTH, 6)\n centerY = round((minY + maxY) / 2 / HEIGHT, 6)\n w = round((maxX - minX) / WIDTH, 6)\n h = round((maxY - minY) / HEIGHT, 6)\n\n data = data + f'{objClass} {centerX} {centerY} {w} {h}\\n'\n file = open(f'{saveFolder}/{imgName}.txt', 'w')\n file.write(data[:-1])\n # file.write(data)\n file.close()\n","repo_name":"zyan-repository/work_scripts","sub_path":"dataset/via_to_yolov5.py","file_name":"via_to_yolov5.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42081480676","text":"import os\nimport sys\n\nimport numpy as np\nfrom PIL import Image\n\n\ndef get_class(coco_name_path):\n \"\"\"\n get class name list\n \"\"\"\n classes_path = os.path.expanduser(coco_name_path)\n with open(classes_path) as file:\n class_names = file.readlines()\n class_names = [c.strip() for c in class_names]\n return class_names\n\n\ndef get_resultpath(data_info_path, result_path,\n save_path, coco_name_path,\n imagenet_pic_path):\n \"\"\"\n get result path\n \"\"\"\n name_list = []\n\n with open(data_info_path, 'r') as file:\n for line in file:\n line = line.strip('\\n')\n line = line.split(' ')\n img_name = line[1].split('/')[-1].strip('.bin')\n name_list.append(img_name)\n\n for name in name_list:\n image = Image.open(imagenet_pic_path + '/' + name + '.jpg')\n im_w, im_h = image.size\n\n num_detections = np.fromfile(result_path + '/' + name + '_1.bin', np.float32)\n num_detections = num_detections.astype(np.int16)\n index = num_detections[0]\n out_boxes = np.fromfile(result_path + '/' + name + '_3.bin', np.float32)\n out_boxes = np.reshape(out_boxes, (100, 4))\n out_scores = np.fromfile(result_path + '/' + name + '_2.bin', np.float32)\n out_scores = np.reshape(out_scores, (100))\n out_classes = np.fromfile(result_path + '/' + name + '_4.bin', np.float32)\n out_classes = out_classes.astype(np.int16)\n out_classes = np.reshape(out_classes, (100))\n out_classes = out_classes[:index]\n print(\"out_classes\", out_classes)\n print(\"shape\", out_boxes.shape, out_classes.shape, out_scores.shape, num_detections)\n print('Found {} boxes for {}'.format(len(out_boxes), 'img')) # prompt for found number of bbox\n file = open(save_path + '/' + name + '.txt', 'w')\n print(\"out_scores\", out_scores)\n for i, c in reversed(list(enumerate(out_classes))):\n predicted_class = get_class(coco_name_path)\n predicted_class = predicted_class[c]\n box = out_boxes[i]\n score = out_scores[i]\n print(\"out_classes_sorces\", c, predicted_class, score)\n top1, left1, bottom, right = box\n top = top1 * im_h\n left = left1 * im_w\n bottom = (bottom - top1) * im_h\n right = (right - left1) * im_w\n # write detected pos\n file.write(predicted_class + ' ' + str(score) + ' ' + str(left) + ' '\n + str(top) + ' ' + str(right + left) + ' ' + str(\n bottom + top) + '\\n')\n file.close()\n\n\nif __name__ == '__main__':\n \"\"\"\n :param data_info_path: path of benchmark data.info\n :param result_path: path of benchmark generate result, usually is ./result/dumpOutput/\n :param save_path: path of save geberate reseult bin\n :param coco_name_path: path of coco.name\n :param imagenet_pic_path: imaegs path of coco dataset\n sample as:\n python3 ./postprocess_ssd_mobilenet_v1_fpn.py\n /home/Benchmark_autotest/dataset/coco_2014_bin.info\n /home/Benchmark_autotest/result/dumpOutput/\n /home/Benchmark_autotest/dataset/script/SSD/pre_txt.txt\n /home/Benchmark_autotest/dataset/script/images/coco.names\n /root/dataset/coco2014/val2014\n \"\"\"\n if len(sys.argv) < 5:\n raise Exception(\"usage: python3 xxx.py [src_path] [save_path] \"\n \"[coco_name_path] [imagenet_pic_path]\")\n data_info_path = sys.argv[1]\n result_path = sys.argv[2]\n save_path = sys.argv[3]\n coco_name_path = sys.argv[4]\n imagenet_pic_path = sys.argv[5]\n\n data_info_path = os.path.realpath(data_info_path)\n result_path = os.path.realpath(result_path)\n save_path = os.path.realpath(save_path)\n coco_name_path = os.path.realpath(coco_name_path)\n imagenet_pic_path = os.path.realpath(imagenet_pic_path)\n\n if not os.path.isdir(save_path):\n os.makedirs(save_path)\n\n get_resultpath(data_info_path, result_path, save_path, coco_name_path, imagenet_pic_path)\n","repo_name":"Ascend/tools","sub_path":"cann-benchmark_infer_scripts/scripts/ssd-mobilenetv1-fpn_tf_postprocess.py","file_name":"ssd-mobilenetv1-fpn_tf_postprocess.py","file_ext":"py","file_size_in_byte":4094,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"21"} +{"seq_id":"74966625331","text":"import streamlit as st\nfrom PIL import Image\nimport pandas as pd\nimport numpy as np \n\nst.header('10 Cool Beginner Python Tricks That Will Make Your Life Easier')\n\n\nimage = Image.open('minion.jpg')\n\nst.image(image, caption='Sunrise by the mountains')\n\nst.title('1. Walrus operator')\nst.caption('Example')\nst.text('The Walrus or := operator is one of the latest additions to python 3.8. It is an assignment operator that lets you assign value to a variable within an expression like conditional statements, loops, etc.')\n\ncode = '''Mylist = [1,2,3]\nif(l := len(mylist) > 2)\nprint(l)'''\nst.code(code, language='python')\nst.caption('Output')\ncode2 = ''' 3 '''\nst.code(code2, language='python')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nst.write('st.dataframe Display a dataframe as an interactive table.')\n\ndf = pd.DataFrame(\n np.random.randn(50, 20),\n columns=('col %d' % i for i in range(20)))\n\nst.dataframe(df) # Same as st.write(df)\n\nst.dataframe(df, 200, 100)\nst.write('You can also pass a Pandas Styler object to change the style of the rendered DataFrame:')\ndf = pd.DataFrame(\n np.random.randn(10, 20),\n columns=('col %d' % i for i in range(20)))\n\nst.dataframe(df.style.highlight_max(axis=0))\n\n","repo_name":"YMacA/bootcapm-datascience","sub_path":"firstpyapp.py","file_name":"firstpyapp.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9932916944","text":"# Project Euler\n# Problem 7\n\n# By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13,\n# we can see that the 6th prime is 13.\n\n# What is the 10 001st prime number?\n\n# https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes\n\nfrom math import *\n\ndef sieve_of_eratosthenes(nth, limit = 10000000):\n\tprime_count = 0\n\tprimes = [True] * limit\n\tprimes[0] = primes[1] = False\n\n\tfor (i, is_prime) in enumerate(primes):\n\t\t if is_prime:\n\t\t \tprime_count += 1\n\t\t \tfor j in xrange(i*i, limit, i):\n\t\t \t\tprimes[j] = False\n\t\t \tif prime_count == nth:\n\t\t \t\tprime = i\n\t\t \t\tbreak\n\treturn prime\n\nprint(sieve_of_eratosthenes(10001))\n","repo_name":"ysx001/project_euler","sub_path":"python/p_007.py","file_name":"p_007.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8548258276","text":"# coding: utf-8\n\nimport trimesh\n\n\ndef show_mesh():\n # attach to logger so trimesh messages will be printed to console\n trimesh.util.attach_to_log()\n\n # mesh objects can be created from existing faces and vertex data\n mesh = trimesh.Trimesh(vertices=[[0, 0, 0], [0, 0, 1], [0, 1, 0]],\n faces=[[0, 1, 2]])\n\n # by default, Trimesh will do a light processing, which will\n # remove any NaN values and merge vertices that share position\n # if you want to not do this on load, you can pass `process=False`\n mesh = trimesh.Trimesh(vertices=[[0, 0, 0], [0, 0, 1], [0, 1, 0]],\n faces=[[0, 1, 2]],\n process=False)\n # preview mesh in an opengl window if you installed pyglet and scipy with pip\n mesh.show() # pip install pyglet -i https://pypi.doubanio.com/simple\n\n\ndef main():\n show_mesh()\n\n\nif __name__ == '__main__':\n main()\n\n\n# References:\n# https://github.com/mikedh/trimesh\n","repo_name":"myd7349/Ongoing-Study","sub_path":"python/trimesh/hello_trimesh.py","file_name":"hello_trimesh.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"21"} +{"seq_id":"42923889237","text":"\"\"\"\nSolver and parser for Assignment A\n\"\"\"\n\n\ndef string_separator(equation):\n \"\"\"\n Converts the list of strings into a list of integers where operators are represented as a negative integer,\n and numbers as positive integers.\n It also connects adjacent numbers into a single two or more digit number\n\n Input: List of strings in which each string represents a prediction made by the model\n Output: List of integers explained above\n \"\"\"\n eltype = []\n intcheck = False\n for elem in equation:\n if elem == '0' or elem == '1' or elem == '2' or elem == '3' \\\n or elem == '4' or elem == '5' or elem == '6' or elem == '7' or elem == '8' or elem == '9':\n if intcheck:\n eltype[-1] = eltype[-1] * 10 + int(elem)\n else:\n eltype.append(int(elem))\n intcheck = True\n elif elem == '+' or elem == '\\\\pm':\n eltype.append(-1)\n intcheck = False\n elif elem == '-' or elem == '\\\\neg':\n eltype.append(-2)\n intcheck = False\n elif elem == '/':\n eltype.append(-3)\n intcheck = False\n elif elem == '\\\\times':\n eltype.append(-4)\n intcheck = False\n elif elem == '\\\\{' or elem == '\\\\iota' or elem == '\\\\langle' \\\n or elem == '\\\\llbracket' or elem == '\\\\ell' or elem == '[':\n eltype.append(-5)\n intcheck = False\n elif elem == '\\\\gamma' or elem == '\\\\rangle' or elem == '\\\\}' \\\n or elem == 'j' or elem == ']' or elem == '\\\\rrbracket':\n eltype.append(-6)\n intcheck = False\n\n return eltype\n\n\ndef brackets(eltype):\n \"\"\"\n Checks if the eqation contains brackets\n\n Input: Equation in the form of an integer list\n Output: True if there are brackets in the equation\n False if there are no brackets in the equaiton\n \"\"\"\n for e in range(0, len(eltype)):\n if eltype[e] == -5 or eltype[e] == -6:\n return True\n return False\n\n\ndef printeq(equation):\n \"\"\"\n Prints the equation\n\n Input: List of integers where every integer is represented with a positive integer,\n and an operation with a negative integer\n Output: Prints the equation\n \"\"\"\n string = []\n for e in equation:\n if e >= 0:\n string.append(str(e))\n if e == -1:\n string.append('+')\n if e == -2:\n string.append('-')\n if e == -3:\n string.append('/')\n if e == -4:\n string.append('*')\n if e == -5:\n string.append('(')\n if e == -6:\n string.append(')')\n print(''.join(map(str, string)))\n\n\ndef bracketSplit(equation):\n \"\"\"\n Called when there is a bracket in the equation,\n it splits calculates the inside of the bracket and prints it\n\n Input: The full equation (Only if it contains brackets)\n Output: The equation with the brackets calculated\n \"\"\"\n leftb = 0\n rightb = len(equation) - 1\n for e in range(0, len(equation)):\n if equation[e] == -5:\n leftb = e\n if equation[e] == -6:\n rightb = e\n break\n inside = []\n for c in range(leftb + 1, rightb):\n inside.append(equation[c])\n\n result = calculate(inside)\n del equation[leftb:(rightb + 1)]\n equation.insert(leftb, result)\n printeq(equation)\n if brackets(equation):\n bracketSplit(equation)\n\n return equation\n\n\ndef calculate(eq):\n \"\"\"\n Calculates the whole equation if it doesn't have brackets,\n otherwise, the input is the part of the equation inside of the bracket.\n\n Input: List of integers representing elements of the equation\n Output: Solution of the equation\n \"\"\"\n if len(eq) == 1:\n return eq[0]\n elif len(eq) == 2:\n return eq[1] * -1\n for e in range(0, len(eq) - 2):\n if eq[e + 1] == -4:\n if eq[e + 2] == -2:\n eq[e] = eq[e] * (eq[e + 3] * -1)\n eq.pop(e + 1)\n eq.pop(e + 1)\n eq.pop(e + 1)\n\n else:\n eq[e] = eq[e] * eq[e + 2]\n eq.pop(e + 1)\n eq.pop(e + 1)\n elif eq[e + 1] == -3:\n if eq[e + 2] == -2:\n eq[e] = eq[e] / (eq[e + 3] * -1)\n eq.pop(e + 1)\n eq.pop(e + 1)\n eq.pop(e + 1)\n else:\n eq[e] = eq[e] / eq[e + 2]\n eq.pop(e + 1)\n eq.pop(e + 1)\n\n for e in range(0, len(eq) - 2):\n if eq[e + 1] == -1:\n if eq[e + 2] == -2:\n eq[e] = eq[e] + (eq[e + 3] * -1)\n eq.pop(e + 1)\n eq.pop(e + 1)\n eq.pop(e + 1)\n else:\n eq[e] = eq[e] + eq[e + 2]\n eq.pop(e + 1)\n eq.pop(e + 1)\n elif eq[e + 1] == -2:\n if eq[e + 2] == -2:\n eq[e] = eq[e] + eq[e + 3]\n eq.pop(e + 1)\n eq.pop(e + 1)\n eq.pop(e + 1)\n else:\n eq[e] = eq[e] - eq[e + 2]\n eq.pop(e + 1)\n eq.pop(e + 1)\n return eq[0]\n\n\ndef solve(str_eq):\n \"\"\"\n Final function of the parser/solver.\n\n Input: List of strings (model predictions)\n Output. Prints the full equation\n Prints each step of solving (if the equation contains brackets)\n Prints the final result\n \"\"\"\n final = string_separator(str_eq)\n printeq(final)\n if brackets(final):\n final = bracketSplit(final)\n print(calculate(final))\n","repo_name":"gdamiani1/AssignmentA-main","sub_path":"eq_parser.py","file_name":"eq_parser.py","file_ext":"py","file_size_in_byte":5734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11936162541","text":"# -*- coding: utf-8 -*_\n\nimport os\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport math\nimport pandas as pd\nfrom io import StringIO\nfrom scipy.interpolate import InterpolatedUnivariateSpline\nfrom scipy.signal import savgol_filter, find_peaks, minimum_phase, firwin2\nfrom scipy.special import expit\nfrom scipy.stats import linregress\nfrom scipy.fftpack import next_fast_len\nimport numpy as np\nimport urllib\nfrom time import time\nfrom tabulate import tabulate\nfrom PIL import Image\nimport re\nimport warnings\nimport biquad\nfrom constants import DEFAULT_F_MIN, DEFAULT_F_MAX, DEFAULT_STEP, DEFAULT_MAX_GAIN, DEFAULT_TREBLE_F_LOWER, \\\n DEFAULT_TREBLE_F_UPPER, DEFAULT_TREBLE_MAX_GAIN, DEFAULT_TREBLE_GAIN_K, DEFAULT_SMOOTHING_WINDOW_SIZE, \\\n DEFAULT_SMOOTHING_ITERATIONS, DEFAULT_TREBLE_SMOOTHING_F_LOWER, DEFAULT_TREBLE_SMOOTHING_F_UPPER, \\\n DEFAULT_TREBLE_SMOOTHING_WINDOW_SIZE, DEFAULT_TREBLE_SMOOTHING_ITERATIONS, DEFAULT_TILT, DEFAULT_FS, \\\n DEFAULT_F_RES, DEFAULT_BASS_BOOST_GAIN, DEFAULT_BASS_BOOST_FC, \\\n DEFAULT_BASS_BOOST_Q, DEFAULT_GRAPHIC_EQ_STEP, HARMAN_INEAR_PREFENCE_FREQUENCIES, \\\n HARMAN_ONEAR_PREFERENCE_FREQUENCIES\n\n\nclass FrequencyResponse:\n def __init__(self,\n name=None,\n frequency=None,\n raw=None,\n error=None,\n smoothed=None,\n error_smoothed=None,\n equalization=None,\n parametric_eq=None,\n fixed_band_eq=None,\n equalized_raw=None,\n equalized_smoothed=None,\n target=None):\n if not name:\n raise TypeError('Name must not be a non-empty string.')\n self.name = name.strip()\n\n self.frequency = self._init_data(frequency)\n if not len(self.frequency):\n self.frequency = self.generate_frequencies()\n\n self.raw = self._init_data(raw)\n self.smoothed = self._init_data(smoothed)\n self.error = self._init_data(error)\n self.error_smoothed = self._init_data(error_smoothed)\n self.equalization = self._init_data(equalization)\n self.parametric_eq = self._init_data(parametric_eq)\n self.fixed_band_eq = self._init_data(fixed_band_eq)\n self.equalized_raw = self._init_data(equalized_raw)\n self.equalized_smoothed = self._init_data(equalized_smoothed)\n self.target = self._init_data(target)\n self._sort()\n\n def copy(self, name=None):\n return FrequencyResponse(\n name=self.name + '_copy' if name is None else name,\n frequency=self._init_data(self.frequency),\n raw=self._init_data(self.raw),\n error=self._init_data(self.error),\n smoothed=self._init_data(self.smoothed),\n error_smoothed=self._init_data(self.error_smoothed),\n equalization=self._init_data(self.equalization),\n parametric_eq=self._init_data(self.parametric_eq),\n fixed_band_eq=self._init_data(self.fixed_band_eq),\n equalized_raw=self._init_data(self.equalized_raw),\n equalized_smoothed=self._init_data(self.equalized_smoothed),\n target=self._init_data(self.target)\n )\n\n def _init_data(self, data):\n \"\"\"Initializes data to a clean format. If None is passed and empty array is created. Non-numbers are removed.\"\"\"\n if data is None:\n # None means empty array\n data = []\n elif type(data) == float or type(data) == int:\n # Scalar means all values are that, same shape as frequency\n data = np.ones(self.frequency.shape) * data\n # Replace nans with Nones\n data = [None if x is None or math.isnan(x) else x for x in data]\n # Wrap in Numpy array\n data = np.array(data)\n return data\n\n def _sort(self):\n sorted_inds = self.frequency.argsort()\n self.frequency = self.frequency[sorted_inds]\n for i in range(1, len(self.frequency)):\n if self.frequency[i] == self.frequency[i-1]:\n raise ValueError('Duplicate values found at frequency {}. Remove duplicates manually.'.format(\n self.frequency[i])\n )\n if len(self.raw):\n self.raw = self.raw[sorted_inds]\n if len(self.error):\n self.error = self.error[sorted_inds]\n if len(self.smoothed):\n self.smoothed = self.smoothed[sorted_inds]\n if len(self.error_smoothed):\n self.error_smoothed = self.error_smoothed[sorted_inds]\n if len(self.equalization):\n self.equalization = self.equalization[sorted_inds]\n if len(self.parametric_eq):\n self.parametric_eq = self.parametric_eq[sorted_inds]\n if len(self.fixed_band_eq):\n self.fixed_band_eq = self.fixed_band_eq[sorted_inds]\n if len(self.equalized_raw):\n self.equalized_raw = self.equalized_raw[sorted_inds]\n if len(self.equalized_smoothed):\n self.equalized_smoothed = self.equalized_smoothed[sorted_inds]\n if len(self.target):\n self.target = self.target[sorted_inds]\n\n def reset(self,\n raw=False,\n smoothed=True,\n error=True,\n error_smoothed=True,\n equalization=True,\n fixed_band_eq=True,\n parametric_eq=True,\n equalized_raw=True,\n equalized_smoothed=True,\n target=True):\n \"\"\"Resets data.\"\"\"\n if raw:\n self.raw = self._init_data(None)\n if smoothed:\n self.smoothed = self._init_data(None)\n if error:\n self.error = self._init_data(None)\n if error_smoothed:\n self.error_smoothed = self._init_data(None)\n if equalization:\n self.equalization = self._init_data(None)\n if parametric_eq:\n self.parametric_eq = self._init_data(None)\n if fixed_band_eq:\n self.fixed_band_eq = self._init_data(None)\n if equalized_raw:\n self.equalized_raw = self._init_data(None)\n if equalized_smoothed:\n self.equalized_smoothed = self._init_data(None)\n if target:\n self.target = self._init_data(None)\n\n @classmethod\n def read_from_csv(cls, file_path):\n \"\"\"Reads data from CSV file and constructs class instance.\"\"\"\n name = '.'.join(os.path.split(file_path)[1].split('.')[:-1])\n\n # Read file\n f = open(file_path, 'r', encoding='utf-8')\n s = f.read()\n\n # Regex for AutoEq style CSV\n header_pattern = r'frequency(,(raw|smoothed|error|error_smoothed|equalization|parametric_eq|fixed_band_eq|equalized_raw|equalized_smoothed|target))+'\n float_pattern = r'-?\\d+\\.?\\d+'\n data_2_pattern = r'{fl}[ ,;:\\t]+{fl}?'.format(fl=float_pattern)\n data_n_pattern = r'{fl}([ ,;:\\t]+{fl})+?'.format(fl=float_pattern)\n autoeq_pattern = r'^{header}(\\n{data})+\\n*$'.format(header=header_pattern, data=data_n_pattern)\n\n if re.match(autoeq_pattern, s):\n # Known AutoEq CSV format\n df = pd.read_csv(StringIO(s), sep=',', header=0)\n frequency = list(df['frequency'])\n raw = list(df['raw']) if 'raw' in df else None\n smoothed = list(df['smoothed']) if 'smoothed' in df else None\n error = list(df['error']) if 'error' in df else None\n error_smoothed = list(df['error_smoothed']) if 'error_smoothed' in df else None\n equalization = list(df['equalization']) if 'equalization' in df else None\n parametric_eq = list(df['parametric_eq']) if 'parametric_eq' in df else None\n fixed_band_eq = list(df['fixed_band_eq']) if 'fixed_band_eq' in df else None\n equalized_raw = list(df['equalized_raw']) if 'equalized_raw' in df else None\n equalized_smoothed = list(df['equalized_smoothed']) if 'equalized_smoothed' in df else None\n target = list(df['target']) if 'target' in df else None\n return cls(\n name=name,\n frequency=frequency,\n raw=raw,\n smoothed=smoothed,\n error=error,\n error_smoothed=error_smoothed,\n equalization=equalization,\n parametric_eq=parametric_eq,\n fixed_band_eq=fixed_band_eq,\n equalized_raw=equalized_raw,\n equalized_smoothed=equalized_smoothed,\n target=target\n )\n else:\n # Unknown format, try to guess\n lines = s.split('\\n')\n frequency = []\n raw = []\n for line in lines:\n if re.match(data_2_pattern, line): # float separator float\n floats = re.findall(float_pattern, line)\n frequency.append(float(floats[0])) # Assume first to be frequency\n raw.append(float(floats[1])) # Assume second to be raw\n # Discard all lines which don't match data pattern\n return cls(name=name, frequency=frequency, raw=raw)\n \n def to_dict(self):\n d = dict()\n if len(self.frequency):\n d['frequency'] = self.frequency.tolist()\n if len(self.raw):\n d['raw'] = [x if x is not None else 'NaN' for x in self.raw]\n if len(self.error):\n d['error'] = [x if x is not None else 'NaN' for x in self.error]\n if len(self.smoothed):\n d['smoothed'] = [x if x is not None else 'NaN' for x in self.smoothed]\n if len(self.error_smoothed):\n d['error_smoothed'] = [x if x is not None else 'NaN' for x in self.error_smoothed]\n if len(self.equalization):\n d['equalization'] = [x if x is not None else 'NaN' for x in self.equalization]\n if len(self.parametric_eq):\n d['parametric_eq'] = [x if x is not None else 'NaN' for x in self.parametric_eq]\n if len(self.fixed_band_eq):\n d['fixed_band_eq'] = [x if x is not None else 'NaN' for x in self.fixed_band_eq]\n if len(self.equalized_raw):\n d['equalized_raw'] = [x if x is not None else 'NaN' for x in self.equalized_raw]\n if len(self.equalized_smoothed):\n d['equalized_smoothed'] = [x if x is not None else 'NaN' for x in self.equalized_smoothed]\n if len(self.target):\n d['target'] = [x if x is not None else 'NaN' for x in self.target]\n return d\n\n def write_to_csv(self, file_path=None):\n \"\"\"Writes data to files as CSV.\"\"\"\n file_path = os.path.abspath(file_path)\n df = pd.DataFrame(self.to_dict())\n df.to_csv(file_path, header=True, index=False, float_format='%.2f')\n\n def eqapo_graphic_eq(self, normalize=True, f_step=DEFAULT_GRAPHIC_EQ_STEP):\n \"\"\"Generates EqualizerAPO GraphicEQ string from equalization curve.\"\"\"\n fr = FrequencyResponse(name='hack', frequency=self.frequency, raw=self.equalization)\n n = np.ceil(np.log(20000 / 20) / np.log(f_step))\n f = 20 * f_step**np.arange(n)\n f = np.sort(np.unique(f.astype('int')))\n fr.interpolate(f=f)\n if normalize:\n fr.raw -= np.max(fr.raw) + 0.1\n if fr.raw[0] > 0.0:\n # Prevent bass boost below lowest frequency\n fr.raw[0] = 0.0\n\n # Remove leading zeros\n while np.abs(fr.raw[-1]) < 0.1 and np.abs(fr.raw[-2]) < 0.1: # Last two are zeros\n fr.raw = fr.raw[:-1]\n\n s = '; '.join(['{f} {a:.1f}'.format(f=f, a=a) for f, a in zip(fr.frequency, fr.raw)])\n s = 'GraphicEQ: ' + s\n return s\n\n def write_eqapo_graphic_eq(self, file_path, normalize=True):\n \"\"\"Writes equalization graph to a file as Equalizer APO config.\"\"\"\n file_path = os.path.abspath(file_path)\n s = self.eqapo_graphic_eq(normalize=normalize)\n with open(file_path, 'w', encoding='utf-8') as f:\n f.write(s)\n return s\n\n @staticmethod\n def optimize_biquad_filters(frequency, target, max_time=5, max_filters=None, fs=DEFAULT_FS, fc=None, q=None):\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n import tensorflow.compat.v1 as tf\n tf.get_logger().setLevel('ERROR')\n tf.disable_v2_behavior()\n\n if fc is not None or q is not None:\n if fc is None:\n raise TypeError('\"fc\" must be given if \"q\" is given.')\n if q is None:\n raise TypeError('\"q\" must be give nif \"fc\" is given.')\n if max_filters is not None:\n raise TypeError('\"max_filters\" must not be given when \"fc\" and \"q\" are given.')\n fc = np.array(fc, dtype='float32')\n q = np.array(q, dtype='float32')\n\n parametric = fc is None\n\n # Reset graph to be able to run this again\n tf.reset_default_graph()\n # Sampling frequency\n fs_tf = tf.constant(fs, name='f', dtype='float32')\n\n # Smoothen heavily\n fr_target = FrequencyResponse(name='Filter Initialization', frequency=frequency, raw=target)\n fr_target.smoothen_fractional_octave(window_size=1 / 7, iterations=1000)\n\n # Equalization target\n eq_target = tf.constant(target, name='eq_target', dtype='float32')\n\n n_ls = n_hs = 0\n\n if parametric:\n # Fc and Q not given, parametric equalizer, find initial estimation of peaks and gains\n fr_target_pos = np.clip(fr_target.smoothed, a_min=0.0, a_max=None)\n peak_inds = find_peaks(fr_target_pos)[0]\n fr_target_neg = np.clip(-fr_target.smoothed, a_min=0.0, a_max=None)\n peak_inds = np.concatenate((peak_inds, find_peaks(fr_target_neg)[0]))\n peak_inds.sort()\n peak_inds = peak_inds[np.abs(fr_target.smoothed[peak_inds]) > 0.1]\n\n # Peak center frequencies and gains\n peak_fc = frequency[peak_inds].astype('float32')\n\n if peak_fc[0] > 80:\n # First peak is beyond 80Hz, add peaks to 20Hz and 60Hz\n peak_fc = np.concatenate((np.array([20, 60], dtype='float32'), peak_fc))\n elif peak_fc[0] > 40:\n # First peak is beyond 40Hz, add peak to 20Hz\n peak_fc = np.concatenate((np.array([20], dtype='float32'), peak_fc))\n\n # Gains at peak center frequencies\n interpolator = InterpolatedUnivariateSpline(np.log10(frequency), fr_target.smoothed, k=1)\n peak_g = interpolator(np.log10(peak_fc)).astype('float32')\n\n def remove_small_filters(min_gain):\n # Remove peaks with too little gain\n nonlocal peak_fc, peak_g\n peak_fc = peak_fc[np.abs(peak_g) > min_gain]\n peak_g = peak_g[np.abs(peak_g) > min_gain]\n\n def merge_filters():\n # Merge two filters which have small integral between them\n nonlocal peak_fc, peak_g\n # Form filter pairs, select only filters with equal gain sign\n pair_inds = []\n for j in range(len(peak_fc) - 1):\n if np.sign(peak_g[j]) == np.sign(peak_g[j + 1]):\n pair_inds.append(j)\n\n min_err = None\n min_err_ind = None\n for pair_ind in pair_inds:\n # Interpolate between the two points\n f_0 = peak_fc[pair_ind]\n g_0 = peak_g[pair_ind]\n i_0 = np.argmin(np.abs(frequency - f_0))\n f_1 = peak_fc[pair_ind + 1]\n i_1 = np.argmin(np.abs(frequency - f_1))\n g_1 = peak_g[pair_ind]\n interp = InterpolatedUnivariateSpline(np.log10([f_0, f_1]), [g_0, g_1], k=1)\n line = interp(frequency[i_0:i_1 + 1])\n err = line - fr_target.smoothed[i_0:i_1 + 1]\n err = np.sqrt(np.mean(np.square(err))) # Root mean squared error\n if min_err is None or err < min_err:\n min_err = err\n min_err_ind = pair_ind\n\n if min_err is None:\n # No pairs detected\n return False\n\n # Select smallest error if err < threshold\n if min_err < 0.3:\n # New filter\n c = peak_fc[min_err_ind] * np.sqrt(peak_fc[min_err_ind + 1] / peak_fc[min_err_ind])\n c = frequency[np.argmin(np.abs(frequency - c))]\n g = np.mean([peak_g[min_err_ind], peak_g[min_err_ind + 1]])\n # Remove filters\n peak_fc = np.delete(peak_fc, [min_err_ind, min_err_ind + 1])\n peak_g = np.delete(peak_g, [min_err_ind, min_err_ind + 1])\n # Add filter in-between\n peak_fc = np.insert(peak_fc, min_err_ind, c)\n peak_g = np.insert(peak_g, min_err_ind, g)\n return True\n return False # No prominent filter pairs\n\n # Remove insignificant filters\n remove_small_filters(0.1)\n if len(peak_fc) == 0:\n # All filters were insignificant, exit\n return np.zeros(frequency.shape), 0.0, np.array([]), np.array([]), np.array([])\n\n # Limit filter number to max_filters by removing least significant filters and merging close filters\n if max_filters is not None:\n if len(peak_fc) > max_filters:\n # Remove too small filters\n remove_small_filters(0.2)\n\n if len(peak_fc) > max_filters:\n # Try to remove some more\n remove_small_filters(0.33)\n\n # Merge filters if needed\n while merge_filters() and len(peak_fc) > max_filters:\n pass\n\n if len(peak_fc) > max_filters:\n # Remove smallest filters\n sorted_inds = np.flip(np.argsort(np.abs(peak_g)))\n sorted_inds = sorted_inds[:max_filters]\n peak_fc = peak_fc[sorted_inds]\n peak_g = peak_g[sorted_inds]\n\n sorted_inds = np.argsort(peak_fc)\n peak_fc = peak_fc[sorted_inds]\n peak_g = peak_g[sorted_inds]\n\n n = n_pk = len(peak_fc)\n\n # Frequencies\n f = tf.constant(np.repeat(np.expand_dims(frequency, axis=0), n, axis=0), name='f', dtype='float32')\n\n # Center frequencies\n fc = tf.get_variable('fc', initializer=np.expand_dims(np.log10(peak_fc), axis=1), dtype='float32')\n\n # Q\n Q_init = np.ones([n, 1], dtype='float32') * np.ones([n_pk, 1], dtype='float32')\n Q = tf.get_variable('Q', initializer=Q_init, dtype='float32')\n\n else:\n # Fc and Q given, fixed band equalizer\n Q = tf.get_variable(\n 'Q',\n initializer=np.expand_dims(q, axis=1),\n dtype='float32',\n trainable=False\n )\n\n # Gains at peak center frequencies\n interpolator = InterpolatedUnivariateSpline(np.log10(frequency), fr_target.smoothed, k=1)\n peak_g = interpolator(np.log10(fc)).astype('float32')\n\n # Number of filters\n n = n_pk = len(fc)\n\n # Frequencies\n f = tf.constant(np.repeat(np.expand_dims(frequency, axis=0), n, axis=0), name='f', dtype='float32')\n\n # Center frequencies\n fc = tf.get_variable(\n 'fc',\n initializer=np.expand_dims(np.log10(fc), axis=1),\n dtype='float32',\n trainable=False\n )\n\n # Gain\n gain = tf.get_variable('gain', initializer=np.expand_dims(peak_g, axis=1), dtype='float32')\n\n # Filter design\n\n # Low shelf filter\n # This is not used at the moment but is kept for future\n A = 10 ** (gain[:n_ls, :] / 40)\n w0 = 2 * np.pi * tf.pow(10.0, fc[:n_ls, :]) / fs_tf\n alpha = tf.sin(w0) / (2 * Q[:n_ls, :])\n\n a0_ls = ((A + 1) + (A - 1) * tf.cos(w0) + 2 * tf.sqrt(A) * alpha)\n a1_ls = (-(-2 * ((A - 1) + (A + 1) * tf.cos(w0))) / a0_ls)\n a2_ls = (-((A + 1) + (A - 1) * tf.cos(w0) - 2 * tf.sqrt(A) * alpha) / a0_ls)\n\n b0_ls = ((A * ((A + 1) - (A - 1) * tf.cos(w0) + 2 * tf.sqrt(A) * alpha)) / a0_ls)\n b1_ls = ((2 * A * ((A - 1) - (A + 1) * tf.cos(w0))) / a0_ls)\n b2_ls = ((A * ((A + 1) - (A - 1) * tf.cos(w0) - 2 * tf.sqrt(A) * alpha)) / a0_ls)\n\n # Peak filter\n A = 10 ** (gain[n_ls:n_ls+n_pk, :] / 40)\n w0 = 2 * np.pi * tf.pow(10.0, fc[n_ls:n_ls+n_pk, :]) / fs_tf\n alpha = tf.sin(w0) / (2 * Q[n_ls:n_ls+n_pk, :])\n\n a0_pk = (1 + alpha / A)\n a1_pk = -(-2 * tf.cos(w0)) / a0_pk\n a2_pk = -(1 - alpha / A) / a0_pk\n\n b0_pk = (1 + alpha * A) / a0_pk\n b1_pk = (-2 * tf.cos(w0)) / a0_pk\n b2_pk = (1 - alpha * A) / a0_pk\n\n # High self filter\n # This is not kept at the moment but kept for future\n A = 10 ** (gain[n_ls+n_pk:, :] / 40)\n w0 = 2 * np.pi * tf.pow(10.0, fc[n_ls+n_pk:, :]) / fs_tf\n alpha = tf.sin(w0) / (2 * Q[n_ls+n_pk:, :])\n\n a0_hs = (A + 1) - (A - 1) * tf.cos(w0) + 2 * tf.sqrt(A) * alpha\n a1_hs = -(2 * ((A - 1) - (A + 1) * tf.cos(w0))) / a0_hs\n a2_hs = -((A + 1) - (A - 1) * tf.cos(w0) - 2 * tf.sqrt(A) * alpha) / a0_hs\n\n b0_hs = (A * ((A + 1) + (A - 1) * tf.cos(w0) + 2 * tf.sqrt(A) * alpha)) / a0_hs\n b1_hs = (-2 * A * ((A - 1) + (A + 1) * tf.cos(w0))) / a0_hs\n b2_hs = (A * ((A + 1) + (A - 1) * tf.cos(w0) - 2 * tf.sqrt(A) * alpha)) / a0_hs\n\n # Concatenate all\n a0 = tf.concat([a0_ls, a0_pk, a0_hs], axis=0)\n a1 = tf.concat([a1_ls, a1_pk, a1_hs], axis=0)\n a2 = tf.concat([a2_ls, a2_pk, a2_hs], axis=0)\n b0 = tf.concat([b0_ls, b0_pk, b0_hs], axis=0)\n b1 = tf.concat([b1_ls, b1_pk, b1_hs], axis=0)\n b2 = tf.concat([b2_ls, b2_pk, b2_hs], axis=0)\n\n w = 2 * np.pi * f / fs_tf\n phi = 4 * tf.sin(w / 2) ** 2\n\n a0 = 1.0\n a1 *= -1\n a2 *= -1\n\n # Equalizer frequency response\n eq_op = 10 * tf.log(\n (b0 + b1 + b2) ** 2 + (b0 * b2 * phi - (b1 * (b0 + b2) + 4 * b0 * b2)) * phi\n ) / tf.log(10.0) - 10 * tf.log(\n (a0 + a1 + a2) ** 2 + (a0 * a2 * phi - (a1 * (a0 + a2) + 4 * a0 * a2)) * phi\n ) / tf.log(10.0)\n eq_op = tf.reduce_sum(eq_op, axis=0)\n\n # RMSE as loss\n loss = tf.reduce_mean(tf.square(eq_op - eq_target))\n learning_rate_value = 0.1\n decay = 0.9995\n learning_rate = tf.placeholder('float32', shape=(), name='learning_rate')\n train_step = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)\n\n # Optimization loop\n min_loss = None\n threshold = 0.01\n momentum = 100\n bad_steps = 0\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n t = time()\n while time() - t < max_time:\n step_loss, _ = sess.run([loss, train_step], feed_dict={learning_rate: learning_rate_value})\n if min_loss is None or step_loss < min_loss:\n # Improvement, update model\n _eq, _fc, _Q, _gain = sess.run([eq_op, fc, Q, gain])\n _fc = 10**_fc\n\n if min_loss is None or min_loss - step_loss > threshold:\n # Loss improved\n min_loss = step_loss\n bad_steps = 0\n else:\n # No improvement, increment bad step counter\n bad_steps += 1\n if bad_steps > momentum:\n # Bad steps exceed maximum number of bad steps, break\n break\n learning_rate_value = learning_rate_value * decay\n\n rmse = np.sqrt(min_loss) # RMSE\n\n # Fold center frequencies back to normal\n _fc = np.abs(np.round(_fc / fs) * fs - _fc)\n\n # Squeeze to rank-1 arrays\n _fc = np.squeeze(_fc)\n _Q = np.squeeze(_Q)\n _gain = np.squeeze(_gain)\n\n if parametric:\n # Filter selection slice\n sl = np.logical_and(np.abs(_gain) > 0.1, _fc > 10)\n _fc = _fc[sl]\n _Q = np.abs(_Q[sl])\n _gain = _gain[sl]\n\n # Sort filters by center frequency\n sorted_inds = np.argsort(_fc)\n _fc = _fc[sorted_inds]\n _Q = _Q[sorted_inds]\n _gain = _gain[sorted_inds]\n\n # Expand dimensionality for biquad\n _fc = np.expand_dims(_fc, axis=1)\n _Q = np.expand_dims(np.abs(_Q), axis=1)\n _gain = np.expand_dims(_gain, axis=1)\n # Re-compute eq\n a0, a1, a2, b0, b1, b2 = biquad.peaking(_fc, _Q, _gain, fs=fs)\n frequency = np.repeat(np.expand_dims(frequency, axis=0), len(_fc), axis=0)\n _eq = np.sum(biquad.digital_coeffs(frequency, fs, a0, a1, a2, b0, b1, b2), axis=0)\n\n coeffs_a = np.hstack((np.tile(a0, a1.shape), a1, a2))\n coeffs_b = np.hstack((b0, b1, b2))\n return _eq, rmse, np.squeeze(_fc, axis=1), np.squeeze(_Q, axis=1), np.squeeze(_gain, axis=1), coeffs_a, coeffs_b\n\n def optimize_parametric_eq(self, max_filters=None, fs=DEFAULT_FS):\n \"\"\"Fits multiple biquad filters to equalization curve. If max_filters is a list with more than one element, one\n optimization run will be ran for each element. Each optimization run will continue from the previous. Each\n optimization run results must be combined with results of all the previous runs but can be used independently of\n the preceeding runs' results. If max_filters is [5, 5, 5] the first 5, 10 and 15 filters can be used\n independently.\n\n Args:\n max_filters: List of maximum number of filters available for each filter group optimization.\n fs: Sampling frequency\n\n Returns:\n - **filters:** Numpy array of filters where each row contains one filter fc, Q and gain\n - **n_produced:** Actual number of filters produced for each filter group. Calling with [5, 5] max_filters\n might actually produce [4, 5] filters meaning that first 4 filters can be used\n independently.\n - **max_gains:** Maximum gain value of the equalizer frequency response after each filter group\n optimization. When using sub-set of filters independently the actual max gain of that\n sub-set's frequency response must be applied as a negative digital preamp to avoid\n clipping.\n \"\"\"\n if not len(self.equalization):\n raise ValueError('Equalization has not been done yet.')\n\n if type(max_filters) != list:\n max_filters = [max_filters]\n\n self.parametric_eq = np.zeros(self.frequency.shape)\n fc = Q = gain = np.array([])\n coeffs_a = coeffs_b = np.empty((0, 3))\n n_produced = []\n max_gains = []\n for n in max_filters:\n _eq, rmse, _fc, _Q, _gain, _coeffs_a, _coeffs_b = self.optimize_biquad_filters(\n frequency=self.frequency,\n target=self.equalization - self.parametric_eq,\n max_filters=n,\n fs=fs\n )\n n_produced.append(len(_fc))\n # print('RMSE: {:.2f}dB'.format(rmse))\n self.parametric_eq += _eq\n max_gains.append(np.max(self.parametric_eq))\n fc = np.concatenate((fc, _fc))\n Q = np.concatenate((Q, _Q))\n gain = np.concatenate((gain, _gain))\n coeffs_a = np.vstack((coeffs_a, _coeffs_a))\n coeffs_b = np.vstack((coeffs_b, _coeffs_b))\n\n filters = np.transpose(np.vstack([fc, Q, gain]))\n return filters, n_produced, max_gains\n\n def optimize_fixed_band_eq(self, fc=None, q=None, fs=DEFAULT_FS):\n \"\"\"Fits multiple fixed Fc and Q biquad filters to equalization curve.\n\n Args:\n fc: List of center frequencies for the filters\n q: List of Q values for the filters\n fs: Sampling frequency\n\n Returns:\n - **filters:** Numpy array of filters where each row contains one filter fc, Q and gain\n - **n_produced:** Number of filters. Equals to length or inputs.\n - **max_gains:** Maximum gain value of the equalizer frequency response.\n \"\"\"\n eq, rmse, fc, Q, gain, coeffs_a, coeffs_b = self.optimize_biquad_filters(\n frequency=self.frequency,\n target=self.equalization,\n fc=fc,\n q=q,\n fs=fs\n )\n self.fixed_band_eq = eq\n filters = np.transpose(np.vstack([fc, Q, gain]))\n return filters, len(fc), np.max(self.fixed_band_eq)\n\n @staticmethod\n def write_eqapo_parametric_eq(file_path, filters):\n \"\"\"Writes EqualizerAPO Parameteric eq settings to a file.\"\"\"\n file_path = os.path.abspath(file_path)\n\n with open(file_path, 'w', encoding='utf-8') as f:\n f.write('\\n'.join(['Filter {i}: ON {type} Fc {fc:.0f} Hz Gain {gain:.1f} dB Q {Q:.2f}'.format(\n i=i+1,\n type='PK',\n fc=filters[i, 0],\n Q=filters[i, 1],\n gain=filters[i, 2]\n ) for i in range(len(filters))]))\n\n @staticmethod\n def _split_path(path):\n \"\"\"Splits file system path into components.\"\"\"\n folders = []\n while 1:\n path, folder = os.path.split(path)\n\n if folder != \"\":\n folders.append(folder)\n else:\n if path != \"\":\n folders.append(path)\n\n break\n\n folders.reverse()\n return folders\n\n def minimum_phase_impulse_response(self, fs=DEFAULT_FS, f_res=DEFAULT_F_RES, normalize=True):\n \"\"\"Generates minimum phase impulse response\n\n Inspired by:\n https://sourceforge.net/p/equalizerapo/code/HEAD/tree/tags/1.2/filters/GraphicEQFilter.cpp#l45\n\n Args:\n fs: Sampling frequency in Hz\n f_res: Frequency resolution as sampling interval. 20 would result in sampling at 0 Hz, 20 Hz, 40 Hz, ...\n normalize: Normalize gain to -0.5 dB\n\n Returns:\n Minimum phase impulse response\n \"\"\"\n # Double frequency resolution because it will be halved when converting linear phase IR to minimum phase\n f_res /= 2\n # Interpolate to even sample interval\n fr = FrequencyResponse(name='fr_data', frequency=self.frequency.copy(), raw=self.equalization.copy())\n # Save gain at lowest available frequency\n f_min = np.max([fr.frequency[0], f_res])\n interpolator = InterpolatedUnivariateSpline(np.log10(fr.frequency), fr.raw, k=1)\n gain_f_min = interpolator(np.log10(f_min))\n # Filter length, optimized for FFT speed\n n = round(fs // 2 / f_res)\n n = next_fast_len(n)\n f = np.linspace(0.0, fs // 2, n)\n # Run interpolation\n fr.interpolate(f, pol_order=1)\n # Set gain for all frequencies below original minimum frequency to match gain at the original minimum frequency\n fr.raw[fr.frequency <= f_min] = gain_f_min\n if normalize:\n # Reduce by max gain to avoid clipping with 1 dB of headroom\n fr.raw -= np.max(fr.raw)\n fr.raw -= 0.5\n # Minimum phase transformation by scipy's homomorphic method halves dB gain\n fr.raw *= 2\n # Convert amplitude to linear scale\n fr.raw = 10**(fr.raw / 20)\n # Zero gain at Nyquist frequency\n fr.raw[-1] = 0.0\n # Calculate response\n ir = firwin2(len(fr.frequency)*2, fr.frequency, fr.raw, fs=fs)\n # Convert to minimum phase\n ir = minimum_phase(ir, n_fft=len(ir))\n return ir\n\n def linear_phase_impulse_response(self, fs=DEFAULT_FS, f_res=DEFAULT_F_RES, normalize=True):\n \"\"\"Generates impulse response implementation of equalization filter.\"\"\"\n # Interpolate to even sample interval\n fr = FrequencyResponse(name='fr_data', frequency=self.frequency, raw=self.equalization)\n # Save gain at lowest available frequency\n f_min = np.max([fr.frequency[0], f_res])\n interpolator = InterpolatedUnivariateSpline(np.log10(fr.frequency), fr.raw, k=1)\n gain_f_min = interpolator(np.log10(f_min))\n # Run interpolation\n fr.interpolate(np.arange(0.0, fs // 2, f_res), pol_order=1)\n # Set gain for all frequencies below original minimum frequency to match gain at the original minimum frequency\n fr.raw[fr.frequency <= f_min] = gain_f_min\n if normalize:\n # Reduce by max gain to avoid clipping with 1 dB of headroom\n fr.raw -= np.max(fr.raw)\n fr.raw -= 0.5\n # Convert amplitude to linear scale\n fr.raw = 10**(fr.raw / 20)\n # Calculate response\n fr.frequency = np.append(fr.frequency, fs // 2)\n fr.raw = np.append(fr.raw, 0.0)\n ir = firwin2(len(fr.frequency)*2, fr.frequency, fr.raw, fs=fs)\n return ir\n\n def write_readme(self, file_path, max_filters=None, max_gains=None):\n \"\"\"Writes README.md with picture and Equalizer APO settings.\"\"\"\n file_path = os.path.abspath(file_path)\n dir_path = os.path.dirname(file_path)\n model = self.name\n\n # Write model\n s = '# {}\\n'.format(model)\n s += 'See [usage instructions](https://github.com/jaakkopasanen/AutoEq#usage) for more options and ' \\\n 'info.\\n'\n\n # Add parametric EQ settings\n parametric_eq_path = os.path.join(dir_path, model + ' ParametricEQ.txt')\n if os.path.isfile(parametric_eq_path) and self.parametric_eq is not None and len(self.parametric_eq):\n max_gains = [x + 0.5 for x in max_gains]\n\n # Read Parametric eq\n with open(parametric_eq_path, 'r', encoding='utf-8') as f:\n parametric_eq_str = f.read().strip()\n\n # Filters as Markdown table\n filters = []\n for line in parametric_eq_str.split('\\n'):\n if line == '':\n continue\n filter_type = line[line.index('ON')+3:line.index('Fc')-1]\n if filter_type == 'PK':\n filter_type = 'Peaking'\n if filter_type == 'LS':\n filter_type = 'Low Shelf'\n if filter_type == 'HS':\n filter_type = 'High Shelf'\n fc = line[line.index('Fc')+3:line.index('Gain')-1]\n gain = line[line.index('Gain')+5:line.index('Q')-1]\n q = line[line.index('Q')+2:]\n filters.append([filter_type, fc, q, gain])\n filters_table_str = tabulate(\n filters,\n headers=['Type', 'Fc', 'Q', 'Gain'],\n tablefmt='orgtbl'\n ).replace('+', '|').replace('|-', '|:')\n\n max_filters_str = ''\n if type(max_filters) == list and len(max_filters) > 1:\n n = [0]\n for x in max_filters:\n n.append(n[-1] + x)\n del n[0]\n if len(max_filters) > 3:\n max_filters_str = ', '.join([str(x) for x in n[:-2]]) + ' or {}'.format(n[-2])\n if len(max_filters) == 3:\n max_filters_str = '{n0} or {n1}'.format(n0=n[0], n1=n[1])\n if len(max_filters) == 2:\n max_filters_str = str(n[0])\n max_filters_str = 'The first {} filters can be used independently.'.format(max_filters_str)\n\n preamp_str = ''\n if type(max_gains) == list and len(max_gains) > 1:\n max_gains = [x + 0.1 for x in max_gains]\n if len(max_gains) > 3:\n _s = 'When using independent subset of filters, apply preamp of {}, respectively.'\n preamp_str = ', '.join(['-{:.1f}dB'.format(x) for x in max_gains[:-2]])\n preamp_str += ' or -{:.1f}dB'.format(max_gains[-2])\n if len(max_gains) == 3:\n _s = 'When using independent subset of filters, apply preamp of {}, respectively.'\n preamp_str = '-{g0:.1f}dB or -{g1:.1f}dB'.format(g0=max_gains[0], g1=max_gains[1])\n if len(max_gains) == 2:\n _s = 'When using independent subset of filters, apply preamp of **{}**.'\n preamp_str = '-{:.1f}dB'.format(max_gains[0])\n preamp_str = _s.format(preamp_str)\n\n s += '''\n ### Parametric EQs\n In case of using parametric equalizer, apply preamp of **-{preamp:.1f}dB** and build filters manually\n with these parameters. {max_filters_str}\n {preamp_str}\n\n {filters_table}\n '''.format(\n model=model,\n preamp=max_gains[-1],\n max_filters_str=max_filters_str,\n preamp_str=preamp_str,\n filters_table=filters_table_str\n )\n\n # Add fixed band eq\n fixed_band_eq_path = os.path.join(dir_path, model + ' FixedBandEQ.txt')\n if os.path.isfile(fixed_band_eq_path) and self.fixed_band_eq is not None and len(self.fixed_band_eq):\n preamp = np.min([0.0, float(-np.max(self.fixed_band_eq))]) - 0.5\n\n # Read Parametric eq\n with open(fixed_band_eq_path, 'r', encoding='utf-8') as f:\n fixed_band_eq_str = f.read().strip()\n\n # Filters as Markdown table\n filters = []\n for line in fixed_band_eq_str.split('\\n'):\n if line == '':\n continue\n filter_type = line[line.index('ON') + 3:line.index('Fc') - 1]\n if filter_type == 'PK':\n filter_type = 'Peaking'\n if filter_type == 'LS':\n filter_type = 'Low Shelf'\n if filter_type == 'HS':\n filter_type = 'High Shelf'\n fc = line[line.index('Fc') + 3:line.index('Gain') - 1]\n gain = line[line.index('Gain') + 5:line.index('Q') - 1]\n q = line[line.index('Q') + 2:]\n filters.append([filter_type, fc, q, gain])\n filters_table_str = tabulate(\n filters,\n headers=['Type', 'Fc', 'Q', 'Gain'],\n tablefmt='orgtbl'\n ).replace('+', '|').replace('|-', '|:')\n\n s += '''\n ### Fixed Band EQs\n In case of using fixed band (also called graphic) equalizer, apply preamp of **{preamp:.1f}dB**\n (if available) and set gains manually with these parameters.\n\n {filters_table}\n '''.format(\n model=model,\n preamp=preamp,\n filters_table=filters_table_str\n )\n\n # Write image link\n img_path = os.path.join(dir_path, model + '.png')\n if os.path.isfile(img_path):\n img_url = f'./{os.path.split(img_path)[1]}'\n img_url = urllib.parse.quote(img_url, safe=\"%/:=&?~#+!$,;'@()*[]\")\n s += '''\n ### Graphs\n ![]({})\n '''.format(img_url)\n\n # Write file\n with open(file_path, 'w', encoding='utf-8') as f:\n f.write(re.sub('\\n[ \\t]+', '\\n', s).strip())\n\n @staticmethod\n def generate_frequencies(f_min=DEFAULT_F_MIN, f_max=DEFAULT_F_MAX, f_step=DEFAULT_STEP):\n freq = []\n f = f_min\n while f <= f_max:\n freq.append(f)\n f *= f_step\n return np.array(freq)\n\n def interpolate(self, f=None, f_step=DEFAULT_STEP, pol_order=1, f_min=DEFAULT_F_MIN, f_max=DEFAULT_F_MAX):\n \"\"\"Interpolates missing values from previous and next value. Resets all but raw data.\"\"\"\n # Remove None values\n i = 0\n while i < len(self.raw):\n if self.raw[i] is None:\n self.raw = np.delete(self.raw, i)\n self.frequency = np.delete(self.frequency, i)\n else:\n i += 1\n\n # Interpolation functions\n keys = 'raw error error_smoothed equalization equalized_raw equalized_smoothed target'.split()\n interpolators = dict()\n log_f = np.log10(self.frequency)\n for key in keys:\n if len(self.__dict__[key]):\n interpolators[key] = InterpolatedUnivariateSpline(log_f, self.__dict__[key], k=pol_order)\n\n if f is None:\n self.frequency = self.generate_frequencies(f_min=f_min, f_max=f_max, f_step=f_step)\n else:\n self.frequency = np.array(f)\n\n # Prevent log10 from exploding by replacing zero frequency with small value\n zero_freq_fix = False\n if self.frequency[0] == 0:\n self.frequency[0] = 0.001\n zero_freq_fix = True\n\n # Run interpolators\n log_f = np.log10(self.frequency)\n for key in keys:\n if len(self.__dict__[key]) and key in interpolators:\n self.__dict__[key] = interpolators[key](log_f)\n\n if zero_freq_fix:\n # Restore zero frequency\n self.frequency[0] = 0\n\n # Everything but the interpolated data is affected by interpolating, reset them\n self.reset(**{key: False for key in keys})\n\n def center(self, frequency=1000):\n \"\"\"Removed bias from frequency response.\n\n Args:\n frequency: Frequency which is set to 0 dB. If this is a list with two values then an average between the two\n frequencies is set to 0 dB.\n\n Returns:\n Gain shifted\n \"\"\"\n equal_energy_fr = FrequencyResponse(name='equal_energy', frequency=self.frequency.copy(), raw=self.raw.copy())\n equal_energy_fr.interpolate()\n interpolator = InterpolatedUnivariateSpline(np.log10(equal_energy_fr.frequency), equal_energy_fr.raw, k=1)\n if type(frequency) in [list, np.ndarray] and len(frequency) > 1:\n # Use the average of the gain values between the given frequencies as the difference to be subtracted\n diff = np.mean(equal_energy_fr.raw[np.logical_and(\n equal_energy_fr.frequency >= frequency[0],\n equal_energy_fr.frequency <= frequency[1]\n )])\n else:\n if type(frequency) in [list, np.ndarray]:\n # List or array with only one element\n frequency = frequency[0]\n # Use the gain value at the given frequency as the difference to be subtracted\n diff = interpolator(np.log10(frequency))\n\n self.raw -= diff\n if len(self.smoothed):\n self.smoothed -= diff\n if len(self.error):\n self.error += diff\n if len(self.error_smoothed):\n self.error_smoothed += diff\n\n # Everything but raw, smoothed, errors and target is affected by centering, reset them\n self.reset(raw=False, smoothed=False, error=False, error_smoothed=False, target=False)\n\n return -diff\n\n def _tilt(self, tilt=DEFAULT_TILT):\n \"\"\"Creates a tilt for equalization.\n\n Args:\n tilt: Slope steepness in dB/octave\n\n Returns:\n Tilted data\n \"\"\"\n # Center in logarithmic scale\n c = DEFAULT_F_MIN * np.sqrt(DEFAULT_F_MAX / DEFAULT_F_MIN)\n # N octaves above center\n n_oct = np.log2(self.frequency / c)\n return n_oct * tilt\n\n def create_target(self,\n bass_boost_gain=DEFAULT_BASS_BOOST_GAIN,\n bass_boost_fc=DEFAULT_BASS_BOOST_FC,\n bass_boost_q=DEFAULT_BASS_BOOST_Q,\n tilt=None):\n \"\"\"Creates target curve with bass boost as described by harman target response.\n\n Args:\n bass_boost_gain: Bass boost amount in dB\n bass_boost_fc: Bass boost low shelf center frequency\n bass_boost_q: Bass boost low shelf quality\n tilt: Frequency response tilt (slope) in dB per octave, positive values make it brighter\n\n Returns:\n Target for equalization\n \"\"\"\n bass_boost = biquad.digital_coeffs(\n self.frequency,\n DEFAULT_FS,\n *biquad.low_shelf(bass_boost_fc, bass_boost_q, bass_boost_gain, DEFAULT_FS)\n )\n if tilt is not None:\n tilt = self._tilt(tilt=tilt)\n else:\n tilt = np.zeros(len(self.frequency))\n return bass_boost + tilt\n\n def compensate(self,\n compensation,\n bass_boost_gain=DEFAULT_BASS_BOOST_GAIN,\n bass_boost_fc=DEFAULT_BASS_BOOST_FC,\n bass_boost_q=DEFAULT_BASS_BOOST_Q,\n tilt=None,\n sound_signature=None,\n min_mean_error=False):\n \"\"\"Sets target and error curves.\"\"\"\n # Copy and center compensation data\n compensation = FrequencyResponse(name='compensation', frequency=compensation.frequency, raw=compensation.raw)\n compensation.center()\n\n # Set target\n self.target = compensation.raw + self.create_target(\n bass_boost_gain=bass_boost_gain,\n bass_boost_fc=bass_boost_fc,\n bass_boost_q=bass_boost_q,\n tilt=tilt\n )\n if sound_signature is not None:\n # Sound signature give, add it to target curve\n if not np.all(sound_signature.frequency == self.frequency):\n # Interpolate sound signature to match self on the frequency axis\n sound_signature.interpolate(self.frequency)\n self.target += sound_signature.raw\n\n # Set error\n self.error = self.raw - self.target\n if min_mean_error:\n # Shift error by it's mean in range 100 Hz to 10 kHz\n delta = np.mean(self.error[np.logical_and(self.frequency >= 100, self.frequency <= 10000)])\n self.error -= delta\n self.target += delta\n\n # Smoothed error and equalization results are affected by compensation, reset them\n self.reset(\n raw=False,\n smoothed=False,\n error=False,\n error_smoothed=True,\n equalization=True,\n parametric_eq=True,\n fixed_band_eq=True,\n equalized_raw=True,\n equalized_smoothed=True,\n target=False\n )\n\n def _window_size(self, octaves):\n \"\"\"Calculates moving average window size in indices from octaves.\"\"\"\n # Octaves to coefficient\n k = 2 ** octaves\n # Calculate average step size in frequencies\n steps = []\n for i in range(1, len(self.frequency)):\n steps.append(self.frequency[i] / self.frequency[i - 1])\n step_size = sum(steps) / len(steps)\n # Calculate window size in indices\n # step_size^x = k --> x = ...\n window_size = math.log(k) / math.log(step_size)\n # Half window size\n window_size = window_size\n # Round to integer to be usable as index\n window_size = round(window_size)\n if not window_size % 2:\n window_size += 1\n return window_size\n\n def _sigmoid(self, f_lower, f_upper, a_normal=0.0, a_treble=1.0):\n f_center = np.sqrt(f_upper / f_lower) * f_lower\n half_range = np.log10(f_upper) - np.log10(f_center)\n f_center = np.log10(f_center)\n a = expit((np.log10(self.frequency) - f_center) / (half_range / 4))\n a = a * -(a_normal - a_treble) + a_normal\n return a\n\n def _smoothen_fractional_octave(self,\n data,\n window_size=DEFAULT_SMOOTHING_WINDOW_SIZE,\n iterations=DEFAULT_SMOOTHING_ITERATIONS,\n treble_window_size=None,\n treble_iterations=None,\n treble_f_lower=DEFAULT_TREBLE_SMOOTHING_F_LOWER,\n treble_f_upper=DEFAULT_TREBLE_SMOOTHING_F_UPPER):\n \"\"\"Smooths data.\n\n Args:\n window_size: Filter window size in octaves.\n iterations: Number of iterations to run the filter. Each new iteration is using output of previous one.\n treble_window_size: Filter window size for high frequencies.\n treble_iterations: Number of iterations for treble filter.\n treble_f_lower: Lower boundary of transition frequency region. In the transition region normal filter is \\\n switched to treble filter with sigmoid weighting function.\n treble_f_upper: Upper boundary of transition frequency reqion. In the transition region normal filter is \\\n switched to treble filter with sigmoid weighting function.\n \"\"\"\n if None in self.frequency or None in data:\n # Must not contain None values\n raise ValueError('None values present, cannot smoothen!')\n\n # Normal filter\n y_normal = data\n with warnings.catch_warnings():\n # Savgol filter uses array indexing which is not future proof, ignoring the warning and trusting that this\n # will be fixed in the future release\n warnings.simplefilter(\"ignore\")\n for i in range(iterations):\n y_normal = savgol_filter(y_normal, self._window_size(window_size), 2)\n\n # Treble filter\n y_treble = data\n for _ in range(treble_iterations):\n y_treble = savgol_filter(y_treble, self._window_size(treble_window_size), 2)\n\n # Transition weighted with sigmoid\n k_treble = self._sigmoid(treble_f_lower, treble_f_upper)\n k_normal = k_treble * -1 + 1\n return y_normal * k_normal + y_treble * k_treble\n\n def smoothen_fractional_octave(self,\n window_size=DEFAULT_SMOOTHING_WINDOW_SIZE,\n iterations=DEFAULT_SMOOTHING_ITERATIONS,\n treble_window_size=DEFAULT_TREBLE_SMOOTHING_WINDOW_SIZE,\n treble_iterations=DEFAULT_TREBLE_SMOOTHING_ITERATIONS,\n treble_f_lower=DEFAULT_TREBLE_SMOOTHING_F_LOWER,\n treble_f_upper=DEFAULT_TREBLE_SMOOTHING_F_UPPER):\n \"\"\"Smooths data.\n\n Args:\n window_size: Filter window size in octaves.\n iterations: Number of iterations to run the filter. Each new iteration is using output of previous one.\n treble_window_size: Filter window size for high frequencies.\n treble_iterations: Number of iterations for treble filter.\n treble_f_lower: Lower boundary of transition frequency region. In the transition region normal filter is \\\n switched to treble filter with sigmoid weighting function.\n treble_f_upper: Upper boundary of transition frequency reqion. In the transition region normal filter is \\\n switched to treble filter with sigmoid weighting function.\n \"\"\"\n if treble_f_upper <= treble_f_lower:\n raise ValueError('Upper transition boundary must be greater than lower boundary')\n\n # Smoothen raw data\n self.smoothed = self._smoothen_fractional_octave(\n self.raw,\n window_size=window_size,\n iterations=iterations,\n treble_window_size=treble_window_size,\n treble_iterations=treble_iterations,\n treble_f_lower=treble_f_lower,\n treble_f_upper=treble_f_upper\n )\n\n if len(self.error):\n # Smoothen error data\n self.error_smoothed = self._smoothen_fractional_octave(\n self.error,\n window_size=window_size,\n iterations=iterations,\n treble_window_size=treble_window_size,\n treble_iterations=treble_iterations,\n treble_f_lower=treble_f_lower,\n treble_f_upper=treble_f_upper\n )\n\n # Equalization is affected by smoothing, reset equalization results\n self.reset(\n raw=False,\n smoothed=False,\n error=False,\n error_smoothed=False,\n equalization=True,\n parametric_eq=True,\n fixed_band_eq=True,\n equalized_raw=True,\n equalized_smoothed=True,\n target=False\n )\n\n def smoothen_heavy_light(self):\n \"\"\"Smoothens data by combining light and heavy smoothing and taking maximum.\n\n Returns:\n None\n \"\"\"\n light = self.copy()\n light.name = 'Light'\n light.smoothen_fractional_octave(\n window_size=1 / 6,\n iterations=1,\n treble_f_lower=100,\n treble_f_upper=10000,\n treble_window_size=1 / 3,\n treble_iterations=1\n )\n\n heavy = self.copy()\n heavy.name = 'Heavy'\n heavy.smoothen_fractional_octave(\n window_size=1 / 3,\n iterations=1,\n treble_f_lower=1000,\n treble_f_upper=6000,\n treble_window_size=1.3,\n treble_iterations=1\n )\n\n combination = self.copy()\n combination.name = 'Combination'\n combination.error = np.max(np.vstack([light.error_smoothed, heavy.error_smoothed]), axis=0)\n combination.smoothen_fractional_octave(\n window_size=1 / 3,\n iterations=1,\n treble_f_lower=100,\n treble_f_upper=10000,\n treble_window_size=1 / 3,\n treble_iterations=1\n )\n\n self.smoothed = combination.smoothed.copy()\n self.error_smoothed = combination.error_smoothed.copy()\n\n # Equalization is affected by smoothing, reset equalization results\n self.reset(\n raw=False,\n smoothed=False,\n error=False,\n error_smoothed=False,\n equalization=True,\n parametric_eq=True,\n fixed_band_eq=True,\n equalized_raw=True,\n equalized_smoothed=True,\n target=False\n )\n\n def equalize(self,\n max_gain=DEFAULT_MAX_GAIN,\n smoothen=True,\n treble_f_lower=DEFAULT_TREBLE_F_LOWER,\n treble_f_upper=DEFAULT_TREBLE_F_UPPER,\n treble_max_gain=DEFAULT_TREBLE_MAX_GAIN,\n treble_gain_k=DEFAULT_TREBLE_GAIN_K):\n \"\"\"Creates equalization curve and equalized curve.\n\n Args:\n max_gain: Maximum positive gain in dB\n smoothen: Smooth kinks caused by clipping gain to max gain?\n treble_f_lower: Lower frequency boundary for transition region between normal parameters and treble parameters\n treble_f_upper: Upper frequency boundary for transition reqion between normal parameters and treble parameters\n treble_max_gain: Maximum positive gain in dB in treble region\n treble_gain_k: Coefficient for treble gain, positive and negative. Useful for disbling or reducing \\\n equalization power in treble region. Defaults to 1.0 (not limited).\n \"\"\"\n self.equalization = []\n self.equalized_raw = []\n\n if len(self.error_smoothed):\n error = self.error_smoothed\n elif len(self.error):\n error = self.error\n else:\n raise ValueError('Error data is missing. Call FrequencyResponse.compensate().')\n\n if None in error or None in self.equalization or None in self.equalized_raw:\n # Must not contain None values\n raise ValueError('None values detected during equalization, interpolating data with default parameters.')\n\n # Invert with max gain clipping\n previous_clipped = False\n kink_inds = []\n\n # Max gain at each frequency\n max_gain = self._sigmoid(treble_f_lower, treble_f_upper, a_normal=max_gain, a_treble=treble_max_gain)\n gain_k = self._sigmoid(treble_f_lower, treble_f_upper, a_normal=1.0, a_treble=treble_gain_k)\n for i in range(len(error)):\n gain = - error[i] * gain_k[i]\n clipped = gain > max_gain[i]\n if previous_clipped != clipped:\n kink_inds.append(i)\n previous_clipped = clipped\n if clipped:\n gain = max_gain[i]\n self.equalization.append(gain)\n\n if len(kink_inds) and kink_inds[0] == 0:\n del kink_inds[0]\n\n if smoothen:\n # Smooth out kinks\n window_size = self._window_size(1 / 12)\n doomed_inds = set()\n for i in kink_inds:\n start = i - min(i, (window_size - 1) // 2)\n end = i + 1 + min(len(self.equalization) - i - 1, (window_size - 1) // 2)\n doomed_inds.update(range(start, end))\n doomed_inds = sorted(doomed_inds)\n\n for i in range(1, 3):\n if len(self.frequency) - i in doomed_inds:\n del doomed_inds[doomed_inds.index(len(self.frequency) - i)]\n\n f = np.array([x for i, x in enumerate(self.frequency) if i not in doomed_inds])\n e = np.array([x for i, x in enumerate(self.equalization) if i not in doomed_inds])\n interpolator = InterpolatedUnivariateSpline(np.log10(f), e, k=2)\n self.equalization = interpolator(np.log10(self.frequency))\n else:\n self.equalization = np.array(self.equalization)\n\n # Equalized\n self.equalized_raw = self.raw + self.equalization\n if len(self.smoothed):\n self.equalized_smoothed = self.smoothed + self.equalization\n\n @staticmethod\n def kwarg_defaults(kwargs, **defaults):\n if kwargs is None:\n kwargs = {}\n else:\n kwargs = {key: val for key, val in kwargs.items()}\n for key, val in defaults.items():\n if key not in kwargs:\n kwargs[key] = val\n return kwargs\n\n def plot_graph(self,\n fig=None,\n ax=None,\n show=True,\n raw=True,\n error=True,\n smoothed=True,\n error_smoothed=True,\n equalization=True,\n parametric_eq=True,\n fixed_band_eq=True,\n equalized=True,\n target=True,\n file_path=None,\n f_min=DEFAULT_F_MIN,\n f_max=DEFAULT_F_MAX,\n a_min=None,\n a_max=None,\n color='black',\n raw_plot_kwargs=None,\n smoothed_plot_kwargs=None,\n error_plot_kwargs=None,\n error_smoothed_plot_kwargs=None,\n equalization_plot_kwargs=None,\n parametric_eq_plot_kwargs=None,\n fixed_band_eq_plot_kwargs=None,\n equalized_plot_kwargs=None,\n target_plot_kwargs=None,\n close=False):\n \"\"\"Plots frequency response graph.\"\"\"\n if fig is None:\n fig, ax = plt.subplots()\n fig.set_size_inches(12, 8)\n if not len(self.frequency):\n raise ValueError('\\'frequency\\' has no data!')\n\n if target and len(self.target):\n ax.plot(\n self.frequency, self.target,\n **self.kwarg_defaults(target_plot_kwargs, label='Target', linewidth=5, color='lightblue')\n )\n\n if smoothed and len(self.smoothed):\n ax.plot(\n self.frequency, self.smoothed,\n **self.kwarg_defaults(smoothed_plot_kwargs, label='Raw Smoothed', linewidth=5, color='lightgrey')\n )\n\n if error_smoothed and len(self.error_smoothed):\n ax.plot(\n self.frequency, self.error_smoothed,\n **self.kwarg_defaults(error_smoothed_plot_kwargs, label='Error Smoothed', linewidth=5, color='pink')\n )\n\n if raw and len(self.raw):\n ax.plot(\n self.frequency, self.raw,\n **self.kwarg_defaults(raw_plot_kwargs, label='Raw', linewidth=1, color=color)\n )\n\n if error and len(self.error):\n ax.plot(\n self.frequency, self.error,\n **self.kwarg_defaults(error_plot_kwargs, label='Error', linewidth=1, color='red')\n )\n\n if equalization and len(self.equalization):\n ax.plot(\n self.frequency, self.equalization,\n **self.kwarg_defaults(equalization_plot_kwargs, label='Equalization', linewidth=5, color='lightgreen')\n )\n\n if parametric_eq and len(self.parametric_eq):\n ax.plot(\n self.frequency, self.parametric_eq,\n **self.kwarg_defaults(parametric_eq_plot_kwargs, label='Parametric Eq', linewidth=1, color='darkgreen')\n )\n\n if fixed_band_eq and len(self.fixed_band_eq):\n ax.plot(\n self.frequency, self.fixed_band_eq,\n **self.kwarg_defaults(\n fixed_band_eq_plot_kwargs,\n label='Fixed Band Eq', linewidth=1, color='darkgreen', linestyle='--'\n )\n )\n\n if equalized and len(self.equalized_raw):\n ax.plot(\n self.frequency, self.equalized_raw,\n **self.kwarg_defaults(equalized_plot_kwargs, label='Equalized', linewidth=1, color='blue')\n )\n\n ax.set_xlabel('Frequency (Hz)')\n ax.semilogx()\n ax.set_xlim([f_min, f_max])\n ax.set_ylabel('Amplitude (dBr)')\n ax.set_ylim([a_min, a_max])\n ax.set_title(self.name)\n ax.legend(fontsize=8)\n ax.grid(True, which='major')\n ax.grid(True, which='minor')\n ax.xaxis.set_major_formatter(ticker.StrMethodFormatter('{x:.0f}'))\n if file_path is not None:\n file_path = os.path.abspath(file_path)\n fig.savefig(file_path, dpi=120)\n im = Image.open(file_path)\n im = im.convert('P', palette=Image.ADAPTIVE, colors=60)\n im.save(file_path, optimize=True)\n if show:\n plt.show()\n elif close:\n plt.close(fig)\n return fig, ax\n\n def harman_onear_preference_score(self):\n \"\"\"Calculates Harman preference score for over-ear and on-ear headphones.\n\n Returns:\n - score: Preference score\n - std: Standard deviation of error\n - slope: Slope of linear regression of error\n \"\"\"\n fr = self.copy()\n fr.interpolate(HARMAN_ONEAR_PREFERENCE_FREQUENCIES)\n sl = np.logical_and(fr.frequency >= 50, fr.frequency <= 10000)\n x = fr.frequency[sl]\n y = fr.error[sl]\n\n std = np.std(y, ddof=1) # ddof=1 is required to get the exact same numbers as the Excel from Listen Inc gives\n slope, _, _, _, _ = linregress(np.log(x), y)\n score = 114.490443008238 - 12.62 * std - 15.5163857197367 * np.abs(slope)\n\n return score, std, slope\n\n def harman_inear_preference_score(self):\n \"\"\"Calculates Harman preference score for in-ear headphones.\n\n Returns:\n - score: Preference score\n - std: Standard deviation of error\n - slope: Slope of linear regression of error\n - mean: Mean of absolute error\n \"\"\"\n fr = self.copy()\n fr.interpolate(HARMAN_INEAR_PREFENCE_FREQUENCIES)\n sl = np.logical_and(fr.frequency >= 20, fr.frequency <= 10000)\n x = fr.frequency[sl]\n y = fr.error[sl]\n\n std = np.std(y, ddof=1) # ddof=1 is required to get the exact same numbers as the Excel from Listen Inc gives\n slope, _, _, _, _ = linregress(np.log(x), y)\n # Mean of absolute of error centered by 500 Hz\n delta = fr.error[np.where(fr.frequency == 500.0)[0][0]]\n y = fr.error[np.logical_and(fr.frequency >= 40, fr.frequency <= 10000)] - delta\n mean = np.mean(np.abs(y))\n # Final score\n score = 100.0795 - 8.5 * std - 6.796 * np.abs(slope) - 3.475 * mean\n\n return score, std, slope, mean\n\n def process(self,\n compensation=None,\n min_mean_error=False,\n equalize=False,\n parametric_eq=False,\n fixed_band_eq=False,\n fc=None,\n q=None,\n ten_band_eq=None,\n max_filters=None,\n bass_boost_gain=None,\n bass_boost_fc=None,\n bass_boost_q=None,\n tilt=None,\n sound_signature=None,\n max_gain=DEFAULT_MAX_GAIN,\n treble_f_lower=DEFAULT_TREBLE_F_LOWER,\n treble_f_upper=DEFAULT_TREBLE_F_UPPER,\n treble_max_gain=DEFAULT_TREBLE_MAX_GAIN,\n treble_gain_k=DEFAULT_TREBLE_GAIN_K,\n fs=DEFAULT_FS):\n \"\"\"Runs processing pipeline with interpolation, centering, compensation and equalization.\n\n Args:\n compensation: Compensation FrequencyResponse. Must be interpolated and centered.\n min_mean_error: Minimize mean error. Normally all curves cross at 1 kHz but this makes it possible to shift\n error curve so that mean between 100 Hz and 10 kHz is at minimum. Target curve is shifted\n accordingly. Useful for avoiding large bias caused by a narrow notch or peak at 1 kHz.\n equalize: Run equalization?\n parametric_eq: Optimize peaking filters for parametric eq?\n fixed_band_eq: Optimize peaking filters for fixed band (graphic) eq?\n fc: List of center frequencies for fixed band eq\n q: List of Q values for fixed band eq\n ten_band_eq: Optimize filters for standard ten band eq?\n max_filters: List of maximum number of peaking filters for each additive filter optimization run.\n bass_boost_gain: Bass boost amount in dB.\n bass_boost_fc: Bass boost low shelf center frequency.\n bass_boost_q: Bass boost low shelf quality.\n tilt: Target frequency response tilt in db / octave\n sound_signature: Sound signature as FrequencyResponse instance. Raw data will be used.\n max_gain: Maximum positive gain in dB\n treble_f_lower: Lower bound for treble transition region\n treble_f_upper: Upper boud for treble transition region\n treble_max_gain: Maximum gain in treble region\n treble_gain_k: Gain coefficient in treble region\n fs: Sampling frequency\n\n Returns:\n - **peq_filters:** Numpy array of produced parametric eq peaking filters. Each row contains Fc, Q and gain\n - **n_peq_filters:** Number of produced parametric eq peaking filters for each group.\n - **peq_max_gains:** Maximum positive gains in each parametric eq peaking filter group.\n - **fbeq_filters:** Numpy array of produced fixed band peaking filters. Each row contains Fc, Q and gain\n - **n_fbeq_filters:** Number of produced fixed band peaking filters.\n - **fbeq_max_gains:** Maximum positive gain for fixed band eq.\n \"\"\"\n if parametric_eq and not equalize:\n raise ValueError('equalize must be True when parametric_eq is True.')\n\n if ten_band_eq:\n # Ten band eq is a shortcut for setting Fc and Q values to standard 10-band equalizer filters parameters\n fixed_band_eq = True\n fc = np.array([31.25, 62.5, 125, 250, 500, 1000, 2000, 4000, 8000, 16000], dtype='float32')\n q = np.ones(10, dtype='float32') * np.sqrt(2)\n\n if fixed_band_eq:\n if fc is None or q is None:\n raise ValueError('\"fc\" and \"q\" must be given when \"fixed_band_eq\" is given.')\n # Center frequencies are given but Q is a single value\n # Repeat Q to length of Fc\n if type(q) in [list, np.ndarray]:\n if len(q) == 1:\n q = np.repeat(q[0], len(fc))\n elif len(q) != len(fc):\n raise ValueError('q must have one elemet or the same number of elements as fc.')\n elif type(q) not in [list, np.ndarray]:\n q = np.repeat(q, len(fc))\n\n if fixed_band_eq and not equalize:\n raise ValueError('equalize must be True when fixed_band_eq or ten_band_eq is True.')\n\n if max_filters is not None and type(max_filters) != list:\n max_filters = [max_filters]\n\n # Interpolate to standard frequency vector\n self.interpolate()\n\n # Center by 1kHz\n self.center()\n\n if compensation is not None:\n # Compensate\n self.compensate(\n compensation,\n bass_boost_gain=bass_boost_gain,\n bass_boost_fc=bass_boost_fc,\n bass_boost_q=bass_boost_q,\n tilt=tilt,\n sound_signature=sound_signature,\n min_mean_error=min_mean_error\n )\n\n # Smooth data\n self.smoothen_heavy_light()\n self.smoothen_fractional_octave(\n window_size=1/3,\n treble_window_size=1.4,\n treble_f_lower=6000,\n treble_f_upper=12000\n )\n\n peq_filters = n_peq_filters = peq_max_gains = fbeq_filters = n_fbeq_filters = nfbeq_max_gains = None\n # Equalize\n if equalize:\n self.equalize(\n max_gain=max_gain,\n smoothen=True,\n treble_f_lower=treble_f_lower,\n treble_f_upper=treble_f_upper,\n treble_max_gain=treble_max_gain,\n treble_gain_k=treble_gain_k\n )\n if parametric_eq:\n # Get the filters\n peq_filters, n_peq_filters, peq_max_gains = self.optimize_parametric_eq(max_filters=max_filters, fs=fs)\n if fixed_band_eq:\n fbeq_filters, n_fbeq_filters, nfbeq_max_gains = self.optimize_fixed_band_eq(fc=fc, q=q, fs=fs)\n\n return peq_filters, n_peq_filters, peq_max_gains, fbeq_filters, n_fbeq_filters, nfbeq_max_gains\n","repo_name":"zettonaender/eqapotographiceq-gui","sub_path":"frequency_response.py","file_name":"frequency_response.py","file_ext":"py","file_size_in_byte":70998,"program_lang":"python","lang":"en","doc_type":"code","stars":68,"dataset":"github-code","pt":"21"} +{"seq_id":"35919468262","text":"# coding=utf-8\n# 根据ADSP-CM408F开发板取得的数据绘图 by hjh 2018-7-23\nimport matplotlib.pyplot as plt\nimport nilmd_utils, common_class\nimport time\n\ndataDir = './test_data/' # 408f数据目录\n\n\ndef getTotalData(name, period): # 取得电压相位对齐后的电流电压数据\n global dataDir\n f = open(dataDir + name)\n line = f.readline()\n n = 0\n device = common_class.Device()\n datas = [] # 所有数据\n irmsList = []\n vrmsList = []\n while line:\n if (len(str(line)) < 10):\n datas.append(float(str(line)))\n else:\n datas.append(float(str(line)[0])) # 只保留次数\n line = f.readline()\n f.close()\n while (n < period):\n try:\n power = datas[1280 * (n + 1) + 1 + n * 3] # 取出电流 ,现为功率\n device.powerList.append(power)\n tempList = datas[1280 * n + 1 + n * 3: 1280 * (n + 1) + 1 + n * 3] # 取出单次的电流电压\n except Exception as e:\n pass\n\n for i in range(len(tempList)):\n if (i % 2 == 0):\n irmsList.append(tempList[i])\n else:\n vrmsList.append(tempList[i])\n n += 1\n\n size = len(irmsList)\n print('size = ', size)\n allIrms = []\n allVrms = []\n for number in range(int(size / 640)):\n point = nilmd_utils.getZeroPhase(vrmsList, 10, 320, number)\n v = vrmsList[640 * number:640 * (number + 1)]\n i = irmsList[640 * number:640 * (number + 1)]\n allIrms.extend(i[point:point + 320])\n allVrms.extend(v[point:point + 320])\n device.irmsList.extend(allIrms)\n device.vrmsList.extend(allVrms)\n return device\n\n\ndef drawPowerCompareImage(n, *yValues):\n goodValues, badValues = yValues\n xValues = [x for x in range(len(goodValues))]\n plt.figure(figsize=(12, 6), dpi=80)\n # 创建第一个画板\n # plt.figure(1)\n # 将第一个画板划分为2行2列组成的区块,并获取到第一块区域\n ax1 = plt.subplot(211)\n # 在第一个子区域中绘图\n plt.plot(xValues, goodValues, color='r')\n # plt.scatter(xValues,yValues,color=\"blue\")\n # plt.scatter(xValues,yValues,marker=\"v\",s=50,color=\"r\")\n # 选中第二个子区域,并绘图\n\n xValues = [x for x in range(len(badValues))]\n ax2 = plt.subplot(212)\n plt.plot(xValues, badValues, color='blue')\n ax1.set_title(\"normal_power\")\n ax2.set_title(\"unnormal_power\")\n # plt.tight_layout()\n name = 'compare_12_power_'\n plt.savefig(\"./Image/\" + name + str(time.time()) + \".png\")\n pass\n\n\ndef drawSinglePowerImage(yValues):\n xValues = [x for x in range(len(yValues))]\n plt.figure(figsize=(12, 6), dpi=80)\n # 创建第一个画板\n # plt.figure(1)\n # 将第一个画板划分为2行2列组成的区块,并获取到第一块区域\n ax1 = plt.subplot(111)\n # 在第一个子区域中绘图\n plt.plot(xValues, yValues, color='b')\n # plt.scatter(xValues,yValues,color=\"blue\")\n # plt.scatter(xValues,yValues,marker=\"v\",s=50,color=\"r\")\n # 选中第二个子区域,并绘图\n ax1.set_title(\"unnormal_power\")\n\n # plt.tight_layout()\n name = 'compare_12_power_'\n plt.savefig(\"./Image/\" + name + str(time.time()) + \".png\")\n\n\ndef drawCompareImage(): # 画对比图\n rightList = ['current_2019-11-07_cache_12_3_right_3.txt', 'current_2019-11-07_cache_12_3_right_4.txt']\n badList = ['current_2019-11-07_cache_12_3_bad_3.txt', 'current_2019-11-07_cache_12_3_bad_4.txt']\n rightList = ['2019-11-18_cache_12_good3.txt', '2019-11-18_cache_12_good2.txt', '2019-11-18_cache_12_good.txt']\n badList = ['2019-11-18_cache_12_bad3.txt', '2019-11-18_cache_12_bad2.txt', '2019-11-18_cache_12_bad.txt']\n goodDeviceList = []\n badDeviceList = []\n period = 40 # 16个周期 的640点数\n for i in range(len(rightList)):\n goodDeviceList.append(getTotalData(rightList[i], period))\n for i in range(len(badList)):\n badDeviceList.append(getTotalData(badList[i], period))\n # drawSinglePowerImage(badDeviceList[0].powerList)\n for i in range(len(goodDeviceList)):\n goodDatas = goodDeviceList[i].powerList\n badDatas = badDeviceList[i].powerList\n print(nilmd_utils.zuixiaoerchen(goodDatas))\n print(nilmd_utils.zuixiaoerchen(badDatas))\n # drawPowerCompareImage(i, *(goodDatas,badDatas))\n print('std_good: '+ str(nilmd_utils.getSTDValue(goodDatas)))\n print('std_bad: ' + str(nilmd_utils.getSTDValue(badDatas)))\n #算数均值受电压变化,意义不大\n #简单几何平均数\n\n print('正常几何均数:',pow(nilmd_utils.product(*tuple(goodDatas)),1/len(goodDatas)))\n print('不正常几何均数:', pow(nilmd_utils.product(*tuple(badDatas)), 1 / len(badDatas)))\n pass\n if 1:\n return\n rightIrmsList = goodDeviceList[0].irmsList # 电流\n rightVrmsList = goodDeviceList[0].vrmsList # 电压\n\n badIrmsList = badDeviceList[0].irmsList # 电流\n badVrmsList = badDeviceList[0].vrmsList # 电压\n size = len(rightIrmsList)\n number = int(640 / 2) # 每张图640点\n count = size / number # 图的数量\n n = 0\n std = False # 是否打印标准差\n\n while n < (count):\n yValues = rightIrmsList[number * n:number * (n + 1)]\n if (std):\n size = len(yValues)\n yValues = nilmd_utils.doNormalize(yValues, 1)\n std = nilmd_utils.getSTDValue(yValues)\n print('right_std = ', std)\n yValues = badIrmsList[number * n:number * (n + 1)]\n yValues = nilmd_utils.doNormalize(yValues, 1)\n std = nilmd_utils.getSTDValue(yValues)\n print('bad_std = ', std)\n n += 1\n continue\n xValues = [x for x in range(len(yValues))]\n plt.figure(figsize=(12, 6), dpi=80)\n # 创建第一个画板\n # plt.figure(1)\n # 将第一个画板划分为2行2列组成的区块,并获取到第一块区域\n ax1 = plt.subplot(221)\n # 在第一个子区域中绘图\n plt.plot(xValues, yValues, color='blue')\n # plt.scatter(xValues,yValues,color=\"blue\")\n # plt.scatter(xValues,yValues,marker=\"v\",s=50,color=\"r\")\n # 选中第二个子区域,并绘图\n yValues = rightVrmsList[number * n:number * (n + 1)]\n xValues = [x for x in range(len(yValues))]\n ax2 = plt.subplot(222)\n plt.plot(xValues, yValues, color='blue')\n\n ax3 = plt.subplot(223)\n yValues = badIrmsList[number * n:number * (n + 1)]\n xValues = [x for x in range(len(yValues))]\n plt.plot(xValues, yValues, color='r')\n\n ax4 = plt.subplot(224)\n yValues = badVrmsList[number * n:number * (n + 1)]\n xValues = [x for x in range(len(yValues))]\n plt.plot(xValues, yValues, color='r')\n\n ax1.set_title(\"right_irms\")\n ax2.set_title(\"right_vrms\")\n ax3.set_title(\"bad_irms\")\n ax4.set_title(\"bad_vrms\")\n\n # plt.tight_layout()\n name = 'compare_12_'\n plt.savefig(\"./Image/\" + name + str(n + 1) + \".png\")\n n += 1\n\n\nif __name__ == '__main__':\n drawCompareImage()\n pass\n","repo_name":"JasenWell/NilmdExceptionMonitor","sub_path":"draw_image.py","file_name":"draw_image.py","file_ext":"py","file_size_in_byte":7182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36981251527","text":"from flask_restplus import Namespace, fields\n\nclass BookDto:\n api = Namespace('book', description='book related operations')\n book = api.model('book', {\n 'id': fields.Integer(description='book Identifier'),\n 'name' : fields.String(required=True, description='name'),\n 'author' : fields.String(required=True, description='name'),\n 'ISBN' : fields.String(required=True, description='name'),\n 'year' : fields.Integer(description='year loaned'),\n 'published_date' : fields.DateTime(dt_format='rfc822'),\n 'isactive' : fields.Boolean(description='is active or no, changes when is deleted'),\n 'created_on' : fields.DateTime(dt_format='rfc822'),\n 'updated_on' : fields.DateTime(dt_format='rfc822')\n })","repo_name":"yorjaggy/python_projects","sub_path":"simple_rest_api/app/main/util/dto.py","file_name":"dto.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8624256060","text":"import pandas as pd \nfrom scipy.stats import chi2_contingency\nfrom scipy.stats import binom_test\n\ndf = pd.read_csv(\"clicks.csv\")\n\nprint (df.head())\n\ndf['is_purchase'] = df.click_day.apply(lambda x: \"Purchase\" if pd.notnull(x) else \"No Purchase\")\n\npurchase_counts = df.groupby(['group', 'is_purchase']).user_id.count().reset_index()\n\nprint (purchase_counts)\n\ncontingency = [[316, 1350],\n\t\t\t [183, 1483],\n\t\t\t [83, 1583]]\n\nchi2_stat, pvalue, dof, expfreq = chi2_contingency(contingency)\nprint (pvalue)\n\nis_significant = True\n\nnum_visits = len(df)\nnum_sales_99 = 1000 / .99\nnum_sales_199 = 1000 / 1.99\nnum_sales_499 = 1000 / 4.99\n\np_clicks_099 = num_sales_99 / num_visits\np_clicks_199 = num_sales_199 / num_visits\np_clicks_499 = num_sales_499 / num_visits\n\npvalueA = binom_test(316, 1666, p_clicks_099)\npvalueB = binom_test(183, 1666, p_clicks_199)\npvalueC = binom_test(83, 1666, p_clicks_499)","repo_name":"whatupdc/python_4","sub_path":"farmburg.py","file_name":"farmburg.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17202722645","text":"'''\n@author LeslieZhao\n@date 20230620\n'''\nimport os \n\nimport argparse\n \n\nfrom trainer.DiffuserTrainer import DiffuserTrainer\nimport torch.distributed as dist \nfrom utils.utils import setup_seed,get_data_loader,merge_args\nfrom model.config import Params as DiffuserParams\n\n# torch.multiprocessing.set_start_method('spawn')\n\nparser = argparse.ArgumentParser(description=\"Lora\")\n#---------train set-------------------------------------\nparser.add_argument('--isTrain',action=\"store_false\",help='')\nparser.add_argument('--dist',action=\"store_false\",help='')\nparser.add_argument('--apply_begin_it',action=\"store_false\",help='')\nparser.add_argument('--batch_size',default=16,type=int)\nparser.add_argument('--resolution',default=512,type=int)\nparser.add_argument('--seed',default=10,type=int)\nparser.add_argument('--eval',default=1,type=int,help='whether use eval')\nparser.add_argument('--nDataLoaderThread',default=5,type=int,help='Num of loader threads')\nparser.add_argument('--print_interval',default=100,type=int)\nparser.add_argument('--test_interval',default=100,type=int,help='Test and save every [test_intervaal] epochs')\nparser.add_argument('--save_interval',default=100,type=int,help='save model interval')\nparser.add_argument('--stop_interval',default=20,type=int)\nparser.add_argument('--begin_it',default=0,type=int,help='begin epoch')\nparser.add_argument('--mx_data_length',default=100,type=int,help='max data length')\nparser.add_argument('--max_epoch',default=10000,type=int)\nparser.add_argument('--max_train_steps',default=2000,type=int)\nparser.add_argument('--early_stop',action=\"store_true\",help='')\nparser.add_argument('--train_text_encoder',action=\"store_true\",help='')\n#---------path set--------------------------------------\nparser.add_argument('--checkpoint_path',default='checkpoint',type=str)\nparser.add_argument('--pretrain_path',default=None,type=str)\n\n# ------optimizer set--------------------------------------\nparser.add_argument('--lr',default=0.002,type=float,help=\"Learning rate\")\n\nparser.add_argument(\n '--local_rank',\n type=int,\n default=0,\n help='Local rank passed from distributed launcher'\n)\n\nargs = parser.parse_args()\n\ndef train_net(args):\n train_loader,test_loader,mx_length = get_data_loader(args) \n args.mx_data_length = mx_length \n trainer = DiffuserTrainer(args)\n trainer.train_network(train_loader,test_loader)\n\nif __name__ == \"__main__\":\n \n args = parser.parse_args()\n params = DiffuserParams()\n args = merge_args(args,params)\n if args.dist:\n dist.init_process_group(backend=\"nccl\") # backbend='nccl'\n dist.barrier() # 用于同步训练\n args.world_size = dist.get_world_size() # 一共有几个节点\n args.rank = dist.get_rank() # 当前节点编号\n \n else:\n args.world_size = 1\n args.rank = 0\n\n setup_seed(args.seed+args.rank)\n print(args)\n\n args.checkpoint_path = os.path.join(args.checkpoint_path,args.name)\n \n print(\"local_rank %d | rank %d | world_size: %d\"%(int(os.environ.get('LOCAL_RANK','0')),args.rank,args.world_size))\n if args.rank == 0 :\n if not os.path.exists(args.checkpoint_path):\n os.makedirs(args.checkpoint_path)\n print(\"make dir: \",args.checkpoint_path)\n train_net(args)\n\n\n \n","repo_name":"LeslieZhoa/Simple-Lora","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"21"} +{"seq_id":"39216894387","text":"from flask import Blueprint,jsonify,request\nfrom .testmonial_model import Testmonial\nfrom faker import Faker\nimport random\nimport avinit\n\ntestmonial = Blueprint(\"testmonial\",__name__)\nprofile_image= \"https://xsgames.co/randomusers/\"\ntestmonial_dir = {}\ntestmonial_list = []\nfake = Faker()\n@testmonial.route(\"/\")\ndef get_testmonials():\n limit = request.args.get('limit')\n limit = int(limit)\n if limit > 25:\n return jsonify({\"message\":\"You can't exeed the limit\"})\n for i in range(limit):\n testmonial = Testmonial(id,fake.name(),profile_image,fake.job(),fake.text())\n \n testmonial_dir = {\"id\":testmonial.id,\"name\":testmonial.name,\"profile_pic\":testmonial.profile_pic,\"job\":testmonial.job,\"text\":testmonial.text}\n \n testmonial_list.append(testmonial_dir)\n return jsonify(testmonial_list)\n \n\n@testmonial.route(\"/\")\ndef get_testmonial(id):\n \n return jsonify(testmonial_list[id])","repo_name":"KwikirizaDan/random-users-api","sub_path":"testmonial/testmonial_view.py","file_name":"testmonial_view.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39983783797","text":"# Import modules\nimport pygame, sys, random, os\nfrom pygame.locals import *\nfrom functions import *\n\n# Initialise pygame\npygame.init()\nclock = pygame.time.Clock()\n\n# Caption\npygame.display.set_caption(\"Stack Game - By LyleW473\")\n\n\n# Game variables\ntime_counter = 30000 # 30 seconds in milliseconds\ntime_counter_2 = 8000 # The time counter for game 2\nuser_text = \"\" # Holds the numbers that the user types into the input box \nuser_input_rectangle = pygame.Rect((screen_width / 2) - 100, screen_height - 90, 200, 50) # User input box rectangle\nplayer_score = 0 # The score the player currently has\nstarting_setup = True\nanswered_correctly = 0 # 1 = Correct, -1 = Incorrect\n\n# Check if a text file called \"high_score_1\" exists\nif os.path.exists('high_score_1.txt'):\n # Read the contents of the file:\n with open('high_score_1.txt', 'r') as high_score_file:\n # Set the high score to be the value inside that file\n high_score = int(high_score_file.read())\n# If it doesn't exist\nelse:\n # Set the high score as 0\n high_score = 0\n\n\n# Check if a text file called \"high_score_2\" exists\nif os.path.exists('high_score_2.txt'):\n # Read the contents of the file:\n with open('high_score_2.txt', 'r') as high_score_2_file:\n # Set the high score to be the value inside that file\n high_score_2 = int(high_score_2_file.read())\n# If it doesn't exist\nelse:\n # Set the high score as 0\n high_score_2 = 0\n\n\n# Placeholder values for these items \nstack = 0\ncurrent_question = 0\ncurrent_question_answer = 0\nquestion_answered_time = 0\nthreshold_height = 0\nthreshold_height_tuples = ( (1, 592), (2, 512), (3, 432), (4, 352), (5, 272) )\nlast_threshold_height = 0\npermanent_time_decrement = 0\n\n# Main loop\nrun = True\nwhile run:\n \n # Limit the FPS to 60\n clock.tick(60)\n\n # Menu browsing and updating\n if menu.in_game == False:\n # Find the position of the mouse\n pos = pygame.mouse.get_pos()\n # Update the menu, feeding the clicked variable and mouse position into the function\n menu.update(pos) # Set the clicked variable as the returned value from the menu\n\n # Only if we are in the paused menu, should we draw a \"faded\" timer\n if menu.show_paused_menu == True:\n # If we paused the game from the \"goal element\" game\n if menu.game_v1 == True:\n draw_alpha_text(str(round(time_counter / 1000, 2)), time_font, BLACK, 390, 0) \n # If we paused the game from the \"goal height\" game\n elif menu.game_v2 == True:\n draw_alpha_text(str(round(time_counter_2 / 1000, 2)), time_font, BLACK, 390, 0) \n\n # Check if the player has requested to restart the game\n if menu.reset_game == True:\n # Check which game called the reset function\n if menu.game_v1 == True:\n # Reset all of the game variables\n time_counter, player_score, stack, user_text, starting_setup = reset_game(time_counter, player_score, stack, user_text, starting_setup)\n elif menu.game_v2 == True:\n # Reset all of the game variables\n time_counter_2, player_score, stack, user_text, starting_setup, permanent_time_decrement = reset_game(time_counter_2, player_score, stack, user_text, starting_setup, permanent_time_decrement)\n\n # Reset the current modes (In case the player wants to try a different mode)\n menu.maths_mode = False\n menu.spelling_mode = False\n menu.game_v1 = False\n menu.game_v2 = False\n \n # Now that the game has been reset, set this variable back to False\n menu.reset_game = False \n\n # Game 1 (Reach the goal element)\n if menu.game_v1 == True:\n time_counter, user_text, player_score, starting_setup, answered_correctly, high_score, stack, current_question, current_question_answer, question_answered_time = game_v1(time_counter, user_text, user_input_rectangle, player_score, starting_setup, answered_correctly, high_score, stack, current_question, current_question_answer, question_answered_time)\n \n # Game 2 (Reach the goal height by pushing and popping elements)\n if menu.game_v2 == True:\n time_counter_2, user_text, player_score, starting_setup, answered_correctly, high_score_2, stack, current_question, current_question_answer, question_answered_time, threshold_height, threshold_height_tuples, last_threshold_height, permanent_time_decrement = game_v2(time_counter_2, user_text, user_input_rectangle, player_score, starting_setup, answered_correctly, high_score_2, stack, current_question, current_question_answer, question_answered_time, threshold_height, threshold_height_tuples, last_threshold_height, permanent_time_decrement)\n \n # Event handler\n for event in pygame.event.get():\n if event.type == QUIT:\n run = False\n pygame.quit()\n sys.exit()\n\n # Check if the mouse button has been pressed\n if event.type == MOUSEBUTTONDOWN:\n # Check if the mouse button clicked was the left click\n if event.button == 1: # (1 = left, 2 = middle, 3 = right, 4 = scroll up, 5 = scrolldown)\n menu.clicked = True\n\n\n pygame.display.update()\n","repo_name":"LyleW473/Data-structures","sub_path":"DataStructures/Stack/Stack game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9478220863","text":"from eventhandler import EventHandler\n\ndef manage_pr_state(api, payload):\n labels = api.get_labels();\n\n for label in [\"S-awaiting-merge\", \"S-tests-failed\", \"S-needs-code-changes\"]:\n if label in labels:\n api.remove_label(label)\n if not \"S-awaiting-review\" in labels:\n api.add_label(\"S-awaiting-review\")\n\n # If mergeable is null, the data wasn't available yet. It would be nice to try to fetch that\n # information again.\n if payload[\"action\"] == \"synchronize\" and payload['pull_request']['mergeable']:\n if \"S-needs-rebase\" in labels:\n api.remove_label(\"S-needs-rebase\")\n\nclass StatusUpdateHandler(EventHandler):\n def on_pr_opened(self, api, payload):\n manage_pr_state(api, payload)\n\n def on_pr_updated(self, api, payload):\n manage_pr_state(api, payload)\n\n\nhandler_interface = StatusUpdateHandler\n","repo_name":"magni-/highfive","sub_path":"handlers/status_update/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"14889325244","text":"from django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import HttpResponse, JsonResponse\nfrom rest_framework.parsers import JSONParser\n\nfrom .models import Student\nfrom .serializers import StudentSerializer\n\n\n@csrf_exempt\ndef student_list(request):\n \n ### ExerciseXP func###\n # if request.method == 'GET':\n # students = Student.objects.all() \n # serializer = StudentSerializer(students, many=True) \n # return JsonResponse(serializer.data, safe=False)\n \n ### DailyChallenge func###\n if request.method == 'GET':\n date_joined_param = request.GET.get('date_joined')\n if date_joined_param:\n students = Student.objects.filter(date_joined=date_joined_param)\n else:\n students = Student.objects.all() \n serializer = StudentSerializer(students, many=True) \n return JsonResponse(serializer.data, safe=False)\n \n if request.method == 'POST':\n serializer = StudentSerializer(data=request.POST)\n if serializer.is_valid():\n serializer.save()\n return JsonResponse(serializer.data)\n else:\n JsonResponse({'error':'Invalid json data'}) \n \n \n@csrf_exempt\ndef student_detail(request, student_pk):\n try:\n student = Student.objects.get(id=student_pk)\n except Student.DoesNotExist:\n return HttpResponse(status=404) \n \n if request.method == 'GET':\n serializer = StudentSerializer(student) \n return JsonResponse(serializer.data)\n \n if request.method == 'PUT':\n new_student = JSONParser(request)\n serializer = StudentSerializer(instance=student, data=new_student)\n if serializer.is_valid():\n serializer.save()\n return JsonResponse(serializer.data)\n return JsonResponse(serializer.error, status=400)\n\n if request.method == 'DELETE':\n student.delete()\n return HttpResponse(status=204) # Successfully deleted","repo_name":"alexzborovskii/DI-Bootcamp","sub_path":"Week6/Day2/ExercisesXP/students_project/students/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74991259251","text":"import re\nimport shlex\nfrom typing import Dict, Tuple, Callable\n\nfrom senf import bytes2fsn, fsn2bytes\n\nfrom quodlibet import const\nfrom quodlibet.util import print_d, print_w\nfrom .tcpserver import BaseTCPServer, BaseTCPConnection\n\n\nclass AckError:\n NOT_LIST = 1\n ARG = 2\n PASSWORD = 3\n PERMISSION = 4\n UNKNOWN = 5\n NO_EXIST = 50\n PLAYLIST_MAX = 51\n SYSTEM = 52\n PLAYLIST_LOAD = 53\n UPDATE_ALREADY = 54\n PLAYER_SYNC = 55\n EXIST = 56\n\n\nclass Permissions:\n PERMISSION_NONE = 0\n PERMISSION_READ = 1\n PERMISSION_ADD = 2\n PERMISSION_CONTROL = 4\n PERMISSION_ADMIN = 8\n PERMISSION_ALL = PERMISSION_NONE | \\\n PERMISSION_READ | \\\n PERMISSION_ADD | \\\n PERMISSION_CONTROL | \\\n PERMISSION_ADMIN\n\n\nTAG_MAPPING = [\n (u\"Artist\", \"artist\"),\n (u\"ArtistSort\", \"artistsort\"),\n (u\"Album\", \"album\"),\n (u\"AlbumArtist\", \"albumartist\"),\n (u\"AlbumArtistSort\", \"albumartistsort\"),\n (u\"Title\", \"title\"),\n (u\"Track\", \"tracknumber\"),\n (u\"Genre\", \"genre\"),\n (u\"Date\", \"~year\"),\n (u\"Composer\", \"composer\"),\n (u\"Performer\", \"performer\"),\n (u\"Comment\", \"commend\"),\n (u\"Disc\", \"discnumber\"),\n (u\"Name\", \"~basename\"),\n (u\"MUSICBRAINZ_ARTISTID\", \"musicbrainz_artistid\"),\n (u\"MUSICBRAINZ_ALBUMID\", \"musicbrainz_albumid\"),\n (u\"MUSICBRAINZ_ALBUMARTISTID\", \"musicbrainz_albumartistid\"),\n (u\"MUSICBRAINZ_TRACKID\", \"musicbrainz_trackid\"),\n]\n\n\ndef format_tags(song):\n \"\"\"Gives a tag list message for a song\"\"\"\n\n lines = []\n for mpd_key, ql_key in TAG_MAPPING:\n value = song.comma(ql_key) or None\n\n if value is not None:\n lines.append(u\"%s: %s\" % (mpd_key, value))\n\n return u\"\\n\".join(lines)\n\n\nclass ParseError(Exception):\n pass\n\n\ndef parse_command(line):\n \"\"\"Parses a MPD command (without trailing newline)\n\n Returns (command, [arguments]) or raises ParseError in case of an error.\n \"\"\"\n\n assert isinstance(line, bytes)\n\n parts = re.split(b\"[ \\\\t]+\", line, maxsplit=1)\n if not parts:\n raise ParseError(\"empty command\")\n command = parts[0]\n\n if len(parts) > 1:\n lex = shlex.shlex(bytes2fsn(parts[1], \"utf-8\"), posix=True)\n lex.whitespace_split = True\n lex.commenters = \"\"\n lex.quotes = '\"'\n lex.whitespace = \" \\t\"\n args = [fsn2bytes(a, \"utf-8\") for a in lex]\n else:\n args = []\n\n try:\n command = command.decode(\"utf-8\")\n except ValueError as e:\n raise ParseError(e) from e\n\n dec_args = []\n for arg in args:\n try:\n arg = arg.decode(\"utf-8\")\n except ValueError as e:\n raise ParseError(e) from e\n dec_args.append(arg)\n\n return command, dec_args\n\n\nclass MPDService:\n \"\"\"This is the actual shared MPD service which the clients talk to\"\"\"\n\n version = (0, 17, 0)\n\n def __init__(self, app, config):\n self._app = app\n self._connections = set()\n self._idle_subscriptions = {}\n self._idle_queue = {}\n self._pl_ver = 0\n\n self._config = config\n self._options = app.player_options\n\n if not self._config.config_get(\"password\"):\n self.default_permission = Permissions.PERMISSION_ALL\n else:\n self.default_permission = Permissions.PERMISSION_NONE\n\n def options_changed(*args):\n self.emit_changed(\"options\")\n\n self._options.connect(\"notify::shuffle\", options_changed)\n self._options.connect(\"notify::repeat\", options_changed)\n self._options.connect(\"notify::single\", options_changed)\n\n self._player_sigs = []\n\n def volume_changed(*args):\n self.emit_changed(\"mixer\")\n\n id_ = app.player.connect(\"notify::volume\", volume_changed)\n self._player_sigs.append(id_)\n\n def player_changed(*args):\n self.emit_changed(\"player\")\n\n id_ = app.player.connect(\"paused\", player_changed)\n self._player_sigs.append(id_)\n id_ = app.player.connect(\"unpaused\", player_changed)\n self._player_sigs.append(id_)\n id_ = app.player.connect(\"seek\", player_changed)\n self._player_sigs.append(id_)\n\n def playlist_changed(*args):\n self._pl_ver += 1\n self.emit_changed(\"playlist\")\n\n id_ = app.player.connect(\"song-started\", playlist_changed)\n self._player_sigs.append(id_)\n\n def _get_id(self, info):\n # XXX: we need a unique 31 bit ID, but don't have one.\n # Given that the heap is continuous and each object is >16 bytes\n # this should work\n return (id(info) & 0xFFFFFFFF) >> 1\n\n def destroy(self):\n for id_ in self._player_sigs:\n self._app.player.disconnect(id_)\n del self._options\n del self._app\n\n def add_connection(self, connection):\n self._connections.add(connection)\n self._idle_queue[connection] = set()\n\n def remove_connection(self, connection):\n self._idle_subscriptions.pop(connection, None)\n self._idle_queue.pop(connection, None)\n self._connections.remove(connection)\n\n def register_idle(self, connection, subsystems):\n self._idle_subscriptions[connection] = set(subsystems)\n self.flush_idle()\n\n def flush_idle(self):\n flushed = []\n for conn, subs in self._idle_subscriptions.items():\n # figure out which subsystems to report for each connection\n queued = self._idle_queue[conn]\n if subs:\n to_send = subs & queued\n else:\n to_send = queued\n queued -= to_send\n\n # send out the response and remove the idle status for affected\n # connections\n for subsystem in to_send:\n conn.write_line(u\"changed: %s\" % subsystem)\n if to_send:\n flushed.append(conn)\n conn.ok()\n conn.start_write()\n\n for conn in flushed:\n self._idle_subscriptions.pop(conn, None)\n\n def unregister_idle(self, connection):\n self._idle_subscriptions.pop(connection, None)\n\n def emit_changed(self, subsystem):\n for _conn, subs in self._idle_queue.items():\n subs.add(subsystem)\n self.flush_idle()\n\n def play(self):\n self._app.player.playpause()\n\n def playid(self, songid):\n self.play()\n\n def pause(self, value=None):\n if value is None:\n self._app.player.paused = not self._app.player.paused\n else:\n self._app.player.paused = value\n\n def stop(self):\n self._app.player.stop()\n\n def next(self):\n self._app.player.next()\n\n def previous(self):\n self._app.player.previous()\n\n def seek(self, songpos, time_):\n \"\"\"time_ in seconds\"\"\"\n\n self._app.player.seek(time_ * 1000)\n\n def seekid(self, songid, time_):\n \"\"\"time_ in seconds\"\"\"\n\n self._app.player.seek(time_ * 1000)\n\n def seekcur(self, value, relative):\n if relative:\n pos = self._app.player.get_position()\n self._app.player.seek(pos + value * 1000)\n else:\n self._app.player.seek(value * 1000)\n\n def setvol(self, value):\n \"\"\"value: 0..100\"\"\"\n\n self._app.player.volume = value / 100.0\n\n def repeat(self, value):\n self._options.repeat = value\n\n def random(self, value):\n self._options.shuffle = value\n\n def single(self, value):\n self._options.single = value\n\n def stats(self):\n has_song = int(bool(self._app.player.info))\n stats = [\n (\"artists\", has_song),\n (\"albums\", has_song),\n (\"songs\", has_song),\n (\"uptime\", 1),\n (\"playtime\", 1),\n (\"db_playtime\", 1),\n (\"db_update\", 1252868674),\n ]\n\n return stats\n\n def status(self):\n app = self._app\n info = app.player.info\n\n if info:\n if app.player.paused:\n state = \"pause\"\n else:\n state = \"play\"\n else:\n state = \"stop\"\n\n status = [\n (\"volume\", int(app.player.volume * 100)),\n (\"repeat\", int(self._options.repeat)),\n (\"random\", int(self._options.shuffle)),\n (\"single\", int(self._options.single)),\n (\"consume\", 0),\n (\"playlist\", self._pl_ver),\n (\"playlistlength\", int(bool(app.player.info))),\n (\"mixrampdb\", 0.0),\n (\"state\", state),\n ]\n\n if info:\n status.append((\"audio\", \"%d:%d:%d\" % (\n info(\"~#samplerate\") or 0,\n info(\"~#bitdepth\") or 0,\n info(\"~#channels\") or 0)))\n total_time = int(info(\"~#length\"))\n elapsed_time = int(app.player.get_position() / 1000)\n elapsed_exact = \"%1.3f\" % (app.player.get_position() / 1000.0)\n status.extend([\n (\"song\", 0),\n (\"songid\", self._get_id(info)),\n ])\n\n if state != \"stop\":\n status.extend([\n (\"time\", \"%d:%d\" % (elapsed_time, total_time)),\n (\"elapsed\", elapsed_exact),\n (\"bitrate\", info(\"~#bitrate\")),\n ])\n\n return status\n\n def currentsong(self):\n info = self._app.player.info\n if info is None:\n return None\n\n parts = []\n parts.append(u\"file: %s\" % info(\"~filename\"))\n parts.append(format_tags(info))\n parts.append(u\"Time: %d\" % int(info(\"~#length\")))\n parts.append(u\"Pos: %d\" % 0)\n parts.append(u\"Id: %d\" % self._get_id(info))\n\n return u\"\\n\".join(parts)\n\n def playlistinfo(self, start=None, end=None):\n if start is not None and start > 1:\n return None\n\n return self.currentsong()\n\n def playlistid(self, songid=None):\n return self.currentsong()\n\n def plchanges(self, version):\n if version != self._pl_ver:\n return self.currentsong()\n\n def plchangesposid(self, version):\n info = self._app.player.info\n if version != self._pl_ver and info:\n parts = []\n parts.append(u\"file: %s\" % info(\"~filename\"))\n parts.append(u\"Pos: %d\" % 0)\n parts.append(u\"Id: %d\" % self._get_id(info))\n return u\"\\n\".join(parts)\n\n\nclass MPDServer(BaseTCPServer):\n\n def __init__(self, app, config, port):\n self._app = app\n self._config = config\n super().__init__(port, MPDConnection, const.DEBUG)\n\n def handle_init(self):\n print_d(\"Creating the MPD service\")\n self.service = MPDService(self._app, self._config)\n\n def handle_idle(self):\n print_d(\"Destroying the MPD service\")\n self.service.destroy()\n del self.service\n\n def log(self, msg):\n print_d(msg)\n\n\nclass MPDRequestError(Exception):\n\n def __init__(self, msg, code=AckError.UNKNOWN, index=None):\n self.msg = msg\n self.code = code\n self.index = index\n\n\nclass MPDConnection(BaseTCPConnection):\n\n # ------------ connection interface ------------\n\n def handle_init(self, server):\n service = server.service\n self.service = service\n service.add_connection(self)\n\n str_version = u\".\".join(map(str, service.version))\n self._buf = bytearray((u\"OK MPD %s\\n\" % str_version).encode(\"utf-8\"))\n self._read_buf = bytearray()\n\n # begin - command processing state\n self._use_command_list = False\n # everything below is only valid if _use_command_list is True\n self._command_list_ok = False\n self._command_list = []\n self._command = None\n # end - command processing state\n\n self.permission = self.service.default_permission\n\n self.start_write()\n self.start_read()\n\n def handle_read(self, data):\n self._feed_data(data)\n\n while 1:\n line = self._get_next_line()\n if line is None:\n break\n\n self.log(u\"-> \" + repr(line))\n\n try:\n cmd, args = parse_command(line)\n except ParseError:\n # TODO: not sure what to do here re command lists\n continue\n\n try:\n self._handle_command(cmd, args)\n except MPDRequestError as e:\n self._error(e.msg, e.code, e.index)\n self._use_command_list = False\n del self._command_list[:]\n\n def handle_write(self):\n data = self._buf[:]\n del self._buf[:]\n return data\n\n def can_write(self):\n return bool(self._buf)\n\n def handle_close(self):\n self.log(\"connection closed\")\n self.service.remove_connection(self)\n del self.service\n\n # ------------ rest ------------\n\n def authenticate(self, password):\n if password == self.service._config.config_get(\"password\"):\n self.permission = Permissions.PERMISSION_ALL\n else:\n self.permission = self.service.default_permission\n raise MPDRequestError(\"Password incorrect\", AckError.PASSWORD)\n\n def log(self, msg):\n if const.DEBUG:\n print_d(\"[%s] %s\" % (self.name, msg))\n\n def _feed_data(self, new_data):\n \"\"\"Feed new data into the read buffer\"\"\"\n\n self._read_buf.extend(new_data)\n\n def _get_next_line(self):\n \"\"\"Returns the next line from the read buffer or None\"\"\"\n\n try:\n index = self._read_buf.index(b\"\\n\")\n except ValueError:\n return None\n\n line = bytes(self._read_buf[:index])\n del self._read_buf[:index + 1]\n return line\n\n def write_line(self, line):\n \"\"\"Writes a line to the client\"\"\"\n\n assert isinstance(line, str)\n self.log(u\"<- \" + repr(line))\n\n self._buf.extend(line.encode(\"utf-8\", errors=\"replace\") + b\"\\n\")\n\n def ok(self):\n self.write_line(u\"OK\")\n\n def _error(self, msg, code, index):\n error = []\n error.append(u\"ACK [%d\" % code)\n if index is not None:\n error.append(u\"@%d\" % index)\n assert self._command is not None\n error.append(u\"] {%s}\" % self._command)\n if msg is not None:\n error.append(u\" %s\" % msg)\n self.write_line(u\"\".join(error))\n\n def _handle_command(self, command, args):\n self._command = command\n\n if command == u\"command_list_end\":\n if not self._use_command_list:\n self._error(u\"list_end without begin\", 0, 0)\n return\n\n for i, (cmd, args) in enumerate(self._command_list):\n try:\n self._exec_command(cmd, args)\n except MPDRequestError as e:\n # reraise with index\n raise MPDRequestError(e.msg, e.code, i) from e\n\n self.ok()\n self._use_command_list = False\n del self._command_list[:]\n return\n\n if command in (u\"command_list_begin\", u\"command_list_ok_begin\"):\n if self._use_command_list:\n raise MPDRequestError(u\"begin without end\")\n\n self._use_command_list = True\n self._command_list_ok = command == u\"command_list_ok_begin\"\n assert not self._command_list\n return\n\n if self._use_command_list:\n self._command_list.append((command, args))\n else:\n self._exec_command(command, args)\n\n def _exec_command(self, command, args, no_ack=False):\n self._command = command\n\n if command not in self._commands:\n print_w(\"Unhandled command %r, sending OK.\" % command)\n command = \"ping\"\n\n # Unhandled command, default to OK for now..\n if not self._use_command_list:\n self.ok()\n elif self._command_list_ok:\n self.write_line(u\"list_OK\")\n return\n\n cmd, do_ack, permission = self._commands[command]\n if permission != (self.permission & permission):\n raise MPDRequestError(\"Insufficient permission\",\n AckError.PERMISSION)\n\n cmd(self, self.service, args)\n\n if self._use_command_list:\n if self._command_list_ok:\n self.write_line(u\"list_OK\")\n elif do_ack:\n self.ok()\n\n _commands: Dict[str, Tuple[Callable, bool, int]] = {}\n\n @classmethod\n def Command(cls, name, ack=True, permission=Permissions.PERMISSION_ADMIN):\n\n def wrap(func):\n assert name not in cls._commands, name\n cls._commands[name] = (func, ack, permission)\n return func\n\n return wrap\n\n @classmethod\n def list_commands(cls):\n \"\"\"A list of supported commands\"\"\"\n\n return cls._commands.keys()\n\n\ndef _verify_length(args, length):\n if not len(args) >= length:\n raise MPDRequestError(\"Wrong arg count\")\n\n\ndef _parse_int(arg):\n try:\n return int(arg)\n except ValueError as e:\n raise MPDRequestError(\"invalid arg\") from e\n\n\ndef _parse_bool(arg):\n try:\n value = int(arg)\n if value not in (0, 1):\n raise ValueError\n except ValueError as e:\n raise MPDRequestError(\"invalid arg\") from e\n else:\n return bool(value)\n\n\ndef _parse_range(arg):\n try:\n values = [int(v) for v in arg.split(\":\")]\n except ValueError as e:\n raise MPDRequestError(\"arg in range not a number\") from e\n\n if len(values) == 1:\n return (values[0], values[0] + 1)\n elif len(values) == 2:\n return values\n else:\n raise MPDRequestError(\"invalid range\")\n\n\n@MPDConnection.Command(\"idle\", ack=False)\ndef _cmd_idle(conn, service, args):\n service.register_idle(conn, args)\n\n\n@MPDConnection.Command(\"ping\", permission=Permissions.PERMISSION_NONE)\ndef _cmd_ping(conn, service, args):\n return\n\n\n@MPDConnection.Command(\"password\", permission=Permissions.PERMISSION_NONE)\ndef _cmd_password(conn, service, args):\n _verify_length(args, 1)\n conn.authenticate(args[0])\n\n\n@MPDConnection.Command(\"noidle\")\ndef _cmd_noidle(conn, service, args):\n service.unregister_idle(conn)\n\n\n@MPDConnection.Command(\"close\", ack=False,\n permission=Permissions.PERMISSION_NONE)\ndef _cmd_close(conn, service, args):\n conn.close()\n\n\n@MPDConnection.Command(\"play\")\ndef _cmd_play(conn, service, args):\n service.play()\n\n\n@MPDConnection.Command(\"listplaylists\")\ndef _cmd_listplaylists(conn, service, args):\n pass\n\n\n@MPDConnection.Command(\"list\")\ndef _cmd_list(conn, service, args):\n pass\n\n\n@MPDConnection.Command(\"playid\")\ndef _cmd_playid(conn, service, args):\n _verify_length(args, 1)\n songid = _parse_int(args[0])\n service.playid(songid)\n\n\n@MPDConnection.Command(\"pause\")\ndef _cmd_pause(conn, service, args):\n value = None\n if args:\n _verify_length(args, 1)\n value = _parse_bool(args[0])\n service.pause(value)\n\n\n@MPDConnection.Command(\"stop\")\ndef _cmd_stop(conn, service, args):\n service.stop()\n\n\n@MPDConnection.Command(\"next\")\ndef _cmd_next(conn, service, args):\n service.next()\n\n\n@MPDConnection.Command(\"previous\")\ndef _cmd_previous(conn, service, args):\n service.previous()\n\n\n@MPDConnection.Command(\"repeat\")\ndef _cmd_repeat(conn, service, args):\n _verify_length(args, 1)\n value = _parse_bool(args[0])\n service.repeat(value)\n\n\n@MPDConnection.Command(\"random\")\ndef _cmd_random(conn, service, args):\n _verify_length(args, 1)\n value = _parse_bool(args[0])\n service.random(value)\n\n\n@MPDConnection.Command(\"single\")\ndef _cmd_single(conn, service, args):\n _verify_length(args, 1)\n value = _parse_bool(args[0])\n service.single(value)\n\n\n@MPDConnection.Command(\"setvol\")\ndef _cmd_setvol(conn, service, args):\n _verify_length(args, 1)\n value = _parse_int(args[0])\n service.setvol(value)\n\n\n@MPDConnection.Command(\"status\")\ndef _cmd_status(conn, service, args):\n status = service.status()\n for k, v in status:\n conn.write_line(u\"%s: %s\" % (k, v))\n\n\n@MPDConnection.Command(\"stats\")\ndef _cmd_stats(conn, service, args):\n status = service.stats()\n for k, v in status:\n conn.write_line(u\"%s: %s\" % (k, v))\n\n\n@MPDConnection.Command(\"currentsong\")\ndef _cmd_currentsong(conn, service, args):\n stats = service.currentsong()\n if stats is not None:\n conn.write_line(stats)\n\n\n@MPDConnection.Command(\"count\")\ndef _cmd_count(conn, service, args):\n conn.write_line(u\"songs: 0\")\n conn.write_line(u\"playtime: 0\")\n\n\n@MPDConnection.Command(\"plchanges\")\ndef _cmd_plchanges(conn, service, args):\n _verify_length(args, 1)\n version = _parse_int(args[0])\n changes = service.plchanges(version)\n if changes is not None:\n conn.write_line(changes)\n\n\n@MPDConnection.Command(\"plchangesposid\")\ndef _cmd_plchangesposid(conn, service, args):\n _verify_length(args, 1)\n version = _parse_int(args[0])\n changes = service.plchangesposid(version)\n if changes is not None:\n conn.write_line(changes)\n\n\n@MPDConnection.Command(\"listallinfo\")\ndef _cmd_listallinfo(*args):\n _cmd_currentsong(*args)\n\n\n@MPDConnection.Command(\"seek\")\ndef _cmd_seek(conn, service, args):\n _verify_length(args, 2)\n songpos = _parse_int(args[0])\n time_ = _parse_int(args[1])\n service.seek(songpos, time_)\n\n\n@MPDConnection.Command(\"seekid\")\ndef _cmd_seekid(conn, service, args):\n _verify_length(args, 2)\n songid = _parse_int(args[0])\n time_ = _parse_int(args[1])\n service.seekid(songid, time_)\n\n\n@MPDConnection.Command(\"seekcur\")\ndef _cmd_seekcur(conn, service, args):\n _verify_length(args, 1)\n\n relative = False\n time_ = args[0]\n if time_.startswith((\"+\", \"-\")):\n relative = True\n\n try:\n time_ = int(time_)\n except ValueError as e:\n raise MPDRequestError(\"arg not a number\") from e\n\n service.seekcur(time_, relative)\n\n\n@MPDConnection.Command(\"outputs\")\ndef _cmd_outputs(conn, service, args):\n conn.write_line(u\"outputid: 0\")\n conn.write_line(u\"outputname: dummy\")\n conn.write_line(u\"outputenabled: 1\")\n\n\n@MPDConnection.Command(\"commands\", permission=Permissions.PERMISSION_NONE)\ndef _cmd_commands(conn, service, args):\n for name in conn.list_commands():\n conn.write_line(u\"command: \" + str(name))\n\n\n@MPDConnection.Command(\"tagtypes\")\ndef _cmd_tagtypes(conn, service, args):\n for mpd_key, _ql_key in TAG_MAPPING:\n conn.write_line(mpd_key)\n\n\n@MPDConnection.Command(\"lsinfo\")\ndef _cmd_lsinfo(conn, service, args):\n _verify_length(args, 1)\n\n\n@MPDConnection.Command(\"playlistinfo\")\ndef _cmd_playlistinfo(conn, service, args):\n if args:\n _verify_length(args, 1)\n start, end = _parse_range(args[0])\n result = service.playlistinfo(start, end)\n else:\n result = service.playlistinfo()\n if result is not None:\n conn.write_line(result)\n\n\n@MPDConnection.Command(\"playlistid\")\ndef _cmd_playlistid(conn, service, args):\n if args:\n songid = _parse_int(args[0])\n else:\n songid = None\n result = service.playlistid(songid)\n if result is not None:\n conn.write_line(result)\n","repo_name":"quodlibet/quodlibet","sub_path":"quodlibet/ext/events/mpdserver/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":23212,"program_lang":"python","lang":"en","doc_type":"code","stars":1306,"dataset":"github-code","pt":"21"} +{"seq_id":"872122517","text":"\n\n# criar a classe\n\nclass Funcionarios:\n def __init__(self, nome, sobrenome, data_nascimento):\n self.nome = nome\n self.sobrenome = sobrenome\n self.data_nascimento = data_nascimento\n \n\n# criar o objeto\nusuario1 = Funcionarios('Elena', 'Cabral', '12/01/2009')\nusuario2 = Funcionarios('Carol', 'Silva', '15/10/2005')\nusuario3 = Funcionarios('Emerson', 'Pedro', '07/09/2002')\n\n# print\nprint(usuario1.nome)\nprint(usuario2.nome)\nprint(usuario3.nome)\n","repo_name":"EmersonPedroMenezes/Estudos-De-Python","sub_path":"P.O.O(PROGAMAÇÃO_ORIENTADA_A_OBJETOS/Construtores.py","file_name":"Construtores.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"37651591458","text":"import pika\nfrom pika.exchange_type import ExchangeType\nconnection_parameters = pika.ConnectionParameters(\"localhost\")\n\nconnection = pika.BlockingConnection(connection_parameters)\n\nchannel = connection.channel()\n\nchannel.exchange_declare(exchange=\"topicrouting\", exchange_type=ExchangeType.topic)\n\n\nmessage = \"Hello, I am sending payment message\"\nmessage1 = \"Hello, I am sending analytics message\"\n\nchannel.basic_publish(exchange=\"topicrouting\",routing_key=\"user.pk.payments\",body=message )\nchannel.basic_publish(exchange=\"topicrouting\",routing_key=\"user.pk.analytics\",body=message1 )\n\n\nprint(f\"Sent Message : {message}\")\n\nprint(f\"Sent Message : {message1}\")\n\nconnection.close()","repo_name":"muddasar-de/RabbitMQ","sub_path":"Routing-Topic Exchange/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"629527561","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 27 22:15:56 2017\n\n@author: Naruto_kathi\n\"\"\"\n\nfrom nltk.corpus import wordnet\n\nsynsets = wordnet.synsets(\"good\")\nsynonyms = []\nantonyms = []\n\nfor syn in synsets:\n for lemma in syn.lemmas():\n synonyms.append(lemma.name())\n if lemma.antonyms():\n antonyms.append(lemma.antonyms()[0].name())\n \n\n\n\nprint(set(synonyms))\nprint(set(antonyms))\n\n\n\nword1 = wordnet.synset(\"ship.n.01\")\nword2 = wordnet.synset(\"submarine.n.01\")\n\n#symentic similarity of words\nprint(word2.wup_similarity(word1))","repo_name":"saisravankathi/nltk","sub_path":"synsets.py","file_name":"synsets.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74303724212","text":"import argparse\nfrom os.path import abspath\nimport sys\n\nfrom sentencepiece import SentencePieceProcessor\n\n\ndef main(lines):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model\", required=True, type=abspath, help=\"Path to model file\")\n args = parser.parse_args()\n\n sp = SentencePieceProcessor()\n sp.Load(args.model)\n\n for line in lines:\n print(sp.DecodePieces(line.strip().split(\" \")))\n\n\n\nif __name__ == \"__main__\":\n main(sys.stdin)\n","repo_name":"cl-tohoku/aobav2","sub_path":"aoba/spm/detokenize_spm.py","file_name":"detokenize_spm.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31289294914","text":"from typing import List\n\n\nclass Node:\n def __init__(self, val=None, children=None):\n self.val = val\n self.children = children\n\n\nclass Solution:\n def preorder(self, root: Node) -> List[int]:\n def dfs(node):\n if not node:\n return\n ans.append(node.val)\n for c in node.children:\n dfs(c)\n ans = []\n dfs(root)\n return ans\n\n\nclass Solution2:\n def preorder(self, root: Node) -> List[int]:\n if not root:\n return []\n ans = []\n s = [root]\n while s:\n node = s.pop()\n ans.append(node.val)\n for c in node.children[::-1]:\n s.append(c)\n return ans\n\n\nsolution = Solution2()\nroot = Node(1, [Node(3, [Node(5, []), Node(6, [])]), Node(2, []), Node(4, [])])\nassert [1, 3, 5, 6, 2, 4] == solution.preorder(root)\n","repo_name":"sengami-yuka/py","sub_path":"coding_problems/leetcode/589_n_ary_tree_preorder_traversal.py","file_name":"589_n_ary_tree_preorder_traversal.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4849629151","text":"#!/usr/bin/env python\n# -*- test-case-name: twistedpusher.test.test_event -*-\n# -*- test-case-name: twistedpusher.test.test_eventemitter -*-\n\nimport logging\nimport warnings\nimport traceback\nfrom collections import defaultdict\nfrom itertools import chain\nimport json\nfrom zope.interface import implementer\n\nfrom twistedpusher.interfaces import IEventEmitter\nfrom twistedpusher.errors import BadEventNameError\n\nlog = logging.getLogger(__name__)\n\n\nclass Event(dict):\n \"\"\"\n Encapsulates events.\n\n :ivar name: the event's name\n :type name: str or unicode\n \"\"\"\n def __init__(self, **kwargs):\n super(Event, self).__init__(kwargs)\n\n def __setattr__(self, key, value):\n self[key] = value\n\n def __getattr__(self, item):\n return self[item]\n\n\ndef serialize_pusher_event(event):\n \"\"\"\n Convert an event to serialized JSON. Ignores all fields except ``name``, ``data``, and ``channel``.\n\n :param event: the event to serialize\n :type event: Event\n\n :returns: the event as serialized JSON\n :rtype: str\n\n :raises BadEventNameError: if the event has no name set\n \"\"\"\n tmp_event = dict()\n\n tmp_event['event'] = event.get('name')\n if not tmp_event['event']:\n raise BadEventNameError(\"Event name not set\")\n\n # this is to replace e.g. {} with '' but also gracefully handle a nonexistent 'data' key\n tmp_event['data'] = event.get('data') or ''\n\n if event.get('channel'):\n tmp_event['channel'] = event.channel\n\n serialized_event = json.dumps(tmp_event)\n return serialized_event\n\n\ndef load_pusher_event(raw_event):\n \"\"\"\n Load an event from serialized JSON.\n\n :param raw_event: a serialized JSON event\n :type raw_event: str or unicode\n\n :returns: the parsed event\n :rtype: Event\n\n :raise BadEventNameError: if raw_event had no event name field\n \"\"\"\n event = Event(**json.loads(raw_event))\n try:\n event.name = event.pop('event')\n except KeyError:\n raise BadEventNameError(\"No event name\")\n if (event.name.startswith(('pusher:', 'pusher_internal:'))\n and 'data' in event\n and isinstance(event.data, (str, unicode))):\n event.data = json.loads(event.data)\n elif 'data' not in event:\n event.data = dict()\n return event\n\n\n@implementer(IEventEmitter)\nclass EventEmitter(object):\n \"\"\"\n EventEmitter is a widely-used base class that provides an interface to produce and consume named events.\n\n :Example:\n\n >>> x = EventEmitter()\n >>> x.bind_all(lambda event: log.debug(event))\n >>> x.emit_event(Event(name='this_is_an_event'))\n \"\"\"\n def __init__(self):\n \"\"\"\n Simple event dispatching.\n Listeners receive Event objects.\n \"\"\"\n super(EventEmitter, self).__init__()\n self.listeners = defaultdict(set)\n self.global_listeners = set()\n\n def bind(self, event_name, listener):\n \"\"\"\n Bind a listener to a specific event.\n\n :param event_name: name of the event to bind to\n :type event_name: str or unicode\n\n :param listener: function that will receive those events\n\n :raises ValueError: if listener is not callable\n \"\"\"\n if callable(listener):\n self.listeners[event_name].add(listener)\n else:\n raise ValueError(\"Listener must be callable.\")\n\n def unbind(self, event_name, listener):\n \"\"\"\n Unbind a listener from a specific event.\n\n :param event_name: name of the event that the listener was bound to\n :type event_name: str or unicode\n\n :param listener: function that will no longer receive events\n\n :warns: if listener to be removed is not found\n \"\"\"\n try:\n self.listeners[event_name].remove(listener)\n except KeyError:\n warnings.warn(\"Could not unbind listener {0} from event '{0}': listener not found.\".format(event_name))\n\n def bind_all(self, listener):\n \"\"\"\n Bind a listener to all events produced.\n\n :param listener: function that will receive all events\n\n :raises ValueError: if listener is not a callable\n \"\"\"\n if callable(listener):\n self.global_listeners.add(listener)\n else:\n raise ValueError(\"Global listener must be a callable.\", listener)\n\n def unbind_all(self, listener):\n \"\"\"\n Unbind a global listener.\n\n :param listener: function that will no longer be called with all events\n\n :warns: if specified global listener to be removed is not found\n \"\"\"\n try:\n self.global_listeners.remove(listener)\n except KeyError:\n warnings.warn(\"Could not unbind global listener '{0}': listener not found.\".format(listener))\n\n def emit_event(self, event):\n \"\"\"\n Dispatch an event to registered listeners. Mostly for internal use.\n\n :param event: event object\n :type event: Event\n \"\"\"\n for cb in chain(self.global_listeners, self.listeners[event.name]):\n try:\n cb(event)\n except AssertionError:\n raise\n except Exception:\n # todo find a better method than this to avoid a listener error killing the transport connection\n warnings.warn(\"Error in listener {} called with event '{}': \\n{}\".format(cb.__name__,\n event.name,\n traceback.format_exc()))","repo_name":"czaanja/twistedpusher","sub_path":"twistedpusher/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":5603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19817542351","text":"from selenium.webdriver.remote.webdriver import WebDriver\n\n\nclass BookingFilterations:\n def __init__(self, driver: WebDriver):\n self.driver = driver\n\n def apply_star_rating(self, *star_values):\n rating_element = self.driver.find_element_by_id(\"filter_class\")\n star_child_elements = rating_element.find_elements_by_css_selector(\"*\")\n\n for star_value in star_values:\n for star_element in star_child_elements:\n if str(star_element.get_attribute('innerHTML')).strip() == f'{star_value} stars':\n star_element.click()\n\n def price_lowest_first(self):\n self.driver.find_element_by_css_selector(\n 'a[data-type=\"price\"]'\n ).click()\n","repo_name":"visith1577/selenium_bot","sub_path":"book/booking_filterations.py","file_name":"booking_filterations.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20770848937","text":"import socket\r\n\r\nHOST = '127.0.0.1'\r\nPORT = 65432\r\n\r\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\r\n s.bind((HOST, PORT))\r\n s.listen() # 5 as usual\r\n conn, addr = s.accept()\r\n with conn:\r\n print('Connected by: ', addr)\r\n while True:\r\n data = conn.recv(1024)\r\n if not data:\r\n break\r\n print('Received: ', data)\r\n # print('conn.send', conn.send(data)) # Returns the number of bytes sent.\r\n print('conn.sendall', conn.sendall(data)) # None is returned on success.\r\n","repo_name":"Sun2yKid/Python-Tricks","sub_path":"socket_demo/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21234284732","text":"import numpy\nfrom collections import Counter\nfrom matplotlib import pyplot\nfrom matplotlib.font_manager import FontProperties\n\ndef mor_neko():\n morphemes = []\n\n with open('neko.txt.mecab') as f:\n for line in f:\n cols = line.split('\\t')\n\n if len(cols) >= 2:\n info = cols[1].split(',')\n\n morpheme = {\n 'surface': cols[0], # 表層系\n 'base': info[6], # 基本形\n 'pos': info[0], # 品詞\n 'pos1': info[1] # 品詞細分類\n }\n morphemes.append(morpheme)\n\n if cols[0] == '。':\n yield morphemes\n morphemes = []\n\nwords = []\n\nfor line in mor_neko():\n for morpheme in line:\n words.append(morpheme['surface'])\n\nmycounter = Counter(words)\nlabels = []\ndata = []\n\nfor word in mycounter.most_common(10):\n labels.append(word[0])\n data.append(word[1])\n\npyplot.rcParams['font.family'] = 'IPAPGothic'\n#pyplot.rcParams['font.family'] = 'AppleGothic'\n\npyplot.title(\"頻度上位10語\")\npyplot.bar(numpy.array(range(len(data))), data, tick_label=labels, align=\"center\")\npyplot.show()\n","repo_name":"tikogr/NLP100","sub_path":"04/37.py","file_name":"37.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20780670127","text":"\"\"\"\nProcess the picture in the markdown file and upload it to the picture bed, then replace the original url with the new url.\n\"\"\"\n\n# ///////// IMPORT /////////\n\n# ======== Standard Lib ========\nimport os\nimport re\nimport time\nimport urllib.request\n# ======== Third-Party Lib ========\n\n# ======== Local Lib ========\nimport pic_bed_transformer\n# ///////// CONFIG /////////\n# The root folder of saving the pictures\ncache = \"cache\"\n# The root folder of saving the new documents\noutput = \"output\"\n\n\n\n# ///////// CLASS /////////\nclass FilePicTransformer:\n def __init__(self, mode=\"aliyun_oss\", cache_folder=cache, output_folder=output):\n self.pic_bed_transformer = pic_bed_transformer.PicBedTransformer(mode=mode)\n\n self.cache_folder = cache_folder\n self.output_folder = output_folder\n\n self.content = None\n self.pic_urls = None\n\n def set_file(self, file_path):\n self.file_path = file_path\n # read the markdown file\n self.content = read_md_file(self.file_path)\n # get the file name\n self.file_name = os.path.split(self.file_path)[-1]\n # find the picture urls\n self.pic_urls = find_pic_url(self.content)\n pass\n\n def set_cache_folder(self, cache_folder):\n self.cache_folder = cache_folder\n\n def set_output_folder(self, output_folder):\n self.output_folder = output_folder\n\n def clear(self):\n self.content = None\n self.pic_urls = None\n\n def transform(self):\n if self.content is None or self.pic_urls is None:\n raise ValueError(\"The content and the pic_urls should be set first.\")\n\n # Download the pictures\n for i, pic_url in enumerate(self.pic_urls):\n # Get the new url using the time stamp\n local_pic_name = time.strftime(\"%Y%m%d%H%M%S\", time.localtime()) + \"_\" + str(i) + \".png\"\n local_pic_path = os.path.join(cache, local_pic_name)\n # Download the picture from the url and save it to the local folder\n try:\n new_url = self.pic_bed_transformer.transform(pic_url, local_pic_path)\n # Replace the old url with the new url\n self.content = self.content.replace(pic_url, new_url)\n print(\"Transform the picture %d/%d\" % (i + 1, len(self.pic_urls)))\n except Exception as e:\n print(\"Failed to transform the picture %d/%d\" % (i + 1, len(self.pic_urls)))\n print(e)\n\n # Write the new markdown file\n write_md_file(os.path.join(self.output_folder, self.file_name), self.content)\n\n # print the result\n print(\"Transform the markdown file successfully.\")\n\n\n\n# ///////// UTILS /////////\n# Read the markdown file and return the content\ndef read_md_file(file_path):\n with open(file_path, \"r\", encoding=\"utf-8\") as f:\n content = f.read()\n return content\n\n# Write the markdown file\ndef write_md_file(file_path, content):\n with open(file_path, \"w\", encoding=\"utf-8\") as f:\n f.write(content)\n\n# Find the picture url in the markdown file\ndef find_pic_url(content):\n # The pattern of the picture url\n pattern = re.compile(r\"!\\[.*?\\]\\((.*?)\\)\")\n # Find all the picture urls\n pic_urls = pattern.findall(content)\n return pic_urls\n\n# Replace the picture url in the markdown file\ndef replace_pic_urls(content, pic_urls):\n for i, pic_url in enumerate(pic_urls):\n # Get the new url using the time stamp\n new_url = time.strftime(\"%Y%m%d%H%M%S\", time.localtime()) + \"_\" + str(i) + \".png\"\n # Replace the url\n content = content.replace(pic_url, new_url)\n return content\n\n# Download the picture from the url\ndef download_pic(url, local_file_path):\n # Download the picture\n urllib.request.urlretrieve(url, local_file_path)\n\n# Download the picture from the url without the extension\ndef download_pic_without_extension(url, local_file_path):\n # Download the picture\n urllib.request.urlretrieve(url, local_file_path)\n # Get the extension\n extension = os.path.splitext(local_file_path)[1]\n # Rename the file\n os.rename(local_file_path, local_file_path + extension)\n\n# ///////// TEST CASE /////////\n# ====== Test the read_md_file() ======\ndef test_read_md_file():\n file_path = r\"C:\\Users\\hp\\Desktop\\2万字长文说清自动驾驶功能架构的演进.md\"\n content = read_md_file(file_path)\n print(content)\n\n# ====== Test the find_pic_url() ======\ndef test_find_pic_url():\n file_path = r\"C:\\Users\\hp\\Desktop\\2万字长文说清自动驾驶功能架构的演进.md\"\n content = read_md_file(file_path)\n pic_urls = find_pic_url(content)\n print(pic_urls)\n\n# ====== Test the write_md_file() ======\ndef test_write_md_file():\n file_path = r\"C:\\Users\\hp\\Desktop\\2万字长文说清自动驾驶功能架构的演进.md\"\n new_file_path = r\"C:\\Users\\hp\\Desktop\\2万字长文说清自动驾驶功能架构的演进_bak.md\"\n content = read_md_file(file_path)\n write_md_file(new_file_path, content)\n print(\"test_write_md_file() passed!\")\n\n# ====== Test the download_pic() ======\ndef test_download_pic():\n file_path = r\"C:\\Users\\hp\\Desktop\\2万字长文说清自动驾驶功能架构的演进.md\"\n img_save_path = r\"C:\\Users\\hp\\Desktop\\cache\"\n if not os.path.exists(img_save_path):\n os.makedirs(img_save_path)\n content = read_md_file(file_path)\n pic_urls = find_pic_url(content)\n for i, pic_url in enumerate(pic_urls):\n # Get the picture name using the time stamp\n pic_name = time.strftime(\"%Y%m%d%H%M%S\", time.localtime()) + \"_\" + str(i) + \".jpg\"\n local_file_path = os.path.join(img_save_path, pic_name)\n download_pic(pic_url, local_file_path)\n print(\"Downloaded the picture: {}\".format(pic_name))\n print(\"test_download_pic() passed!\")\n\n# ====== Test the download_pic_without_extension() ======\ndef test_download_pic_without_extension():\n url = \"https://mmbiz.qpic.cn/sz_mmbiz_png/Mw7QHzQec2hvKrR3DiayMIibicJpcE8nRdfhzeaSII7BQfdlkOvkzbccW8qpicUXR6pGia2iclzlbSOvoCXZqyeMCu2A/640?wx_fmt=png&wxfrom=5&wx_lazy=1&wx_co=1\"\n local_file_path = r\"C:\\Users\\hp\\Desktop\\cache\\test.jpg\"\n # download the picture\n ret = urllib.request.urlretrieve(url, local_file_path)\n print(ret)\n\n# ===== Test the replace_pic_urls() =====\ndef test_replace_pic_urls():\n file_path = r\"C:\\Users\\hp\\Desktop\\2万字长文说清自动驾驶功能架构的演进.md\"\n content = read_md_file(file_path)\n pic_urls = find_pic_url(content)\n content = replace_pic_urls(content, pic_urls)\n new_file_path = r\"C:\\Users\\hp\\Desktop\\2万字长文说清自动驾驶功能架构的演进_bak.md\"\n write_md_file(new_file_path, content)\n\n# ===== Test FilePicTransformer =====\ndef test_FilePicTransformer():\n file_path = r\"C:\\Users\\hp\\Desktop\\2万字长文说清自动驾驶功能架构的演进.md\"\n img_save_path = r\"C:\\Users\\hp\\Desktop\\cache\"\n if not os.path.exists(img_save_path):\n os.makedirs(img_save_path)\n file_save_path = r\"C:\\Users\\hp\\Desktop\\output\"\n fpt = FilePicTransformer(cache_folder=img_save_path, output_folder=file_save_path)\n fpt.set_file(file_path)\n fpt.transform()\n print(\"test_FilePicTransformer() passed!\")\n\nif __name__ == \"__main__\":\n # # ====== Test the read_md_file() ======\n # test_read_md_file()\n\n # # ====== Test the find_pic_url() ======\n # test_find_pic_url()\n\n # # ====== Test the write_md_file() ======\n # test_write_md_file()\n\n # ====== Test the download_pic() ======\n # test_download_pic()\n\n # ====== Test the download_pic_without_extension() ======\n # test_download_pic_without_extension()\n\n # ====== Test the replace_pic_urls() ======\n # test_replace_pic_urls()\n\n # ====== Test FilePicTransformer ======\n test_FilePicTransformer()\n\n pass\n","repo_name":"sun2401060413/SZUtils","sub_path":"scripts/pic_bed_transformer/file_pic_transformer.py","file_name":"file_pic_transformer.py","file_ext":"py","file_size_in_byte":7815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2105205240","text":"# class to perturb input until the label changes\n\nimport numpy as np\nimport torch\nfrom typing import Tuple\nfrom transformers import (\n BertAdapterModel,\n AutoTokenizer,\n PreTrainedTokenizer,\n PreTrainedModel,\n BatchEncoding,\n)\n\nfrom src.calibration.explainers import random_attribution\nfrom src import shap_attributions\nfrom src.calibration.explainers.gradients import simple_grads, integrated_grads\nfrom src.calibration.explainers.attentions import attention, scaled_attention\n\ntorch.manual_seed(4)\ntorch.cuda.manual_seed(4)\nnp.random.seed(4)\n\n\nclass InputReduction:\n def __init__(\n self,\n model: PreTrainedModel,\n tokenizer: PreTrainedTokenizer,\n method: str,\n request: dict,\n ):\n\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n self.model = model\n self.model.to(self.device)\n self.tokenizer = tokenizer\n self.question = request[\"question\"]\n self.context = request[\"context\"]\n self.top_k = request[\"top_k\"]\n self.mode = request[\"mode\"]\n self.method = method\n\n self.sep_token = (\n self.tokenizer.sep_token\n if self.tokenizer.sep_token is not None\n else self.tokenizer.eos_token\n )\n self.cls_token = (\n self.tokenizer.cls_token\n if self.tokenizer.cls_token is not None\n else self.tokenizer.bos_token\n )\n\n def compute_attributions(self):\n if self.method == \"random\":\n imp_scores = random_attribution.RandomAttributions(\n self.model, self.tokenizer\n )\n attributions = imp_scores.interpret(\n [[self.question, self.context]], self.top_k, output=\"raw\"\n )\n elif self.method == \"attention\":\n grads = attention.AttnAttribution(self.model, self.tokenizer)\n attributions = grads.interpret(\n [[self.question, self.context]], self.top_k, output=\"raw\"\n )\n elif self.method == \"scaled_attn\":\n grads = scaled_attention.ScaledAttention(self.model, self.tokenizer)\n attributions = grads.interpret(\n [[self.question, self.context]], self.top_k, output=\"raw\"\n )\n elif self.method == \"simple_grads\":\n grads = simple_grads.SimpleGradients(self.model, self.tokenizer)\n attributions = grads.interpret(\n [[self.question, self.context]], self.top_k, output=\"raw\"\n )\n elif self.method == \"integrated_grads\":\n grads = integrated_grads.IntegratedGradients(self.model, self.tokenizer)\n attributions = grads.interpret(\n [[self.question, self.context]], self.top_k, output=\"raw\"\n )\n # elif self.method == \"smooth_grads\":\n # grads = smooth_grads.SmoothGradients(self.model, self.tokenizer)\n # attributions = grads.interpret([[self.question, self.context]], self.top_k, output=\"raw\")\n elif self.method == \"shap\":\n grads = shap_attributions.SHAPAttributions(\n model=self.model,\n tokenizer=self.tokenizer,\n request=[[self.question, self.context]],\n visualize=False,\n topk=self.top_k,\n )\n attributions = grads.run(output=\"raw\")\n else:\n raise Exception(\"Attribution method not allowed\")\n # print(attributions)\n return attributions\n\n def get_predictions(self):\n enc_inputs, attributions, answer_start, answer_end = self.compute_attributions()\n enc_inputs.to(\"cpu\")\n # print(len(enc_inputs[\"input_ids\"][0]), len(attributions))\n token_ids = enc_inputs[\"input_ids\"]\n sep_idx = token_ids.tolist()[0].index(self.tokenizer.sep_token_id)\n # print(sep_idx)\n # don't mask sep and pad tokens\n mask = (token_ids != self.tokenizer.sep_token_id).long() & (\n token_ids != self.tokenizer.pad_token_id\n ).long()\n instance_attribution = (\n torch.tensor(attributions, dtype=torch.float32).to(self.device)\n + (1 - mask.to(self.device)) * 1e-10\n )\n # get top_k % words\n num_words_to_mask = int((self.top_k / 100) * instance_attribution.size()[1])\n if self.mode == \"max\":\n # get top_k attributions from context; can be changed later to include question too\n topk_idx = torch.topk(\n instance_attribution[0][sep_idx:], num_words_to_mask\n ).indices\n elif self.mode == \"min\":\n topk_idx = torch.topk(\n instance_attribution[0][sep_idx:], num_words_to_mask, largest=False\n ).indices\n # mask top_k tokens from context\n words_to_mask = torch.zeros(instance_attribution.size())\n words_to_mask[0][topk_idx + sep_idx] = 1\n tmp_mask = words_to_mask\n sum_before_mask = torch.sum(tmp_mask[0])\n # unmask answer and sep tokens\n words_to_mask[0][answer_start : answer_end + 1] = 0\n sum_after_mask = torch.sum(words_to_mask[0])\n mask_diff = (sum_before_mask - sum_after_mask).cpu().detach().numpy()\n # if answer tokens are masked, we need to unmask them and mask other tokens\n if mask_diff > 0:\n topk_idx = torch.topk(\n instance_attribution[0][sep_idx:], num_words_to_mask + int(mask_diff)\n ).indices\n # mask top_k tokens from context\n words_to_mask = torch.zeros(instance_attribution.size())\n words_to_mask[0][topk_idx + sep_idx] = 1\n # unmask answer and sep tokens\n words_to_mask[0][answer_start : answer_end + 1] = 0\n\n words_to_mask[0][sep_idx] = 0\n # words_to_mask.to(self.device)\n # mask features\n # print(token_ids, words_to_mask)\n inputs_masked = torch.tensor(\n token_ids * (1 - words_to_mask)\n + words_to_mask * self.tokenizer.mask_token_id,\n dtype=torch.int,\n ).to(self.device)\n # print(inputs_masked)\n dec_inputs = self.tokenizer.decode(\n torch.tensor(inputs_masked[0], dtype=torch.int)\n )\n # print(dec_inputs)\n inputs = dec_inputs.split()\n filter_idx = list(inputs).index(self.sep_token)\n inputs = [\n value if self.sep_token not in value else value.replace(self.sep_token, \"\")\n for value in inputs\n ]\n masked_context = \" \".join(inputs[filter_idx:])\n # print(masked_context)\n output_new = self.question_answering([[self.question, masked_context]])\n outputs_old = self.question_answering([[self.question, self.context]])\n\n return outputs_old, output_new\n\n def _ensure_tensor_on_device(self, **inputs):\n \"\"\"\n Ensure PyTorch tensors are on the specified device.\n\n Args:\n inputs (keyword arguments that should be :obj:`torch.Tensor`): The tensors to place on :obj:`self.device`.\n\n Return:\n :obj:`Dict[str, torch.Tensor]`: The same as :obj:`inputs` but on the proper device.\n \"\"\"\n return {name: tensor.to(self.model.device) for name, tensor in inputs.items()}\n\n def _predict(self, request, batch_size=1) -> Tuple[dict, BatchEncoding]:\n \"\"\"\n Inference on the input.\n\n Args:\n request: the request with the input and optional kwargs\n batch_size: input batch size\n\n Returns:\n The model outputs and optionally the input features\n \"\"\"\n all_predictions = []\n # self.model.to(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n features = self.tokenizer(\n request, return_tensors=\"pt\", padding=True, truncation=True, max_length=512\n )\n for start_idx in range(0, len(request), batch_size):\n with torch.no_grad():\n input_features = {\n k: features[k][start_idx : start_idx + batch_size]\n for k in features.keys()\n }\n input_features = self._ensure_tensor_on_device(**input_features)\n predictions = self.model(**input_features)\n all_predictions.append(predictions)\n keys = all_predictions[0].keys()\n final_prediction = {}\n for key in keys:\n if isinstance(all_predictions[0][key], tuple):\n tuple_of_lists = list(\n zip(\n *[\n [\n torch.stack(p).to(self.device)\n if isinstance(p, tuple)\n else p.to(self.device)\n for p in tpl[key]\n ]\n for tpl in all_predictions\n ]\n )\n )\n final_prediction[key] = tuple(torch.cat(l) for l in tuple_of_lists)\n else:\n final_prediction[key] = torch.cat(\n [p[key].to(self.device) for p in all_predictions]\n )\n # print(final_prediction)\n # print(features)\n return final_prediction, features\n\n def question_answering(self, request):\n \"\"\"\n Span-based question answering for a given question and context.\n We expect the input to use the (question, context) format for the text pairs.\n\n Args:\n request: the prediction request\n\n \"\"\"\n\n def decode(\n start_: np.ndarray,\n end_: np.ndarray,\n topk: int,\n max_answer_len: int,\n undesired_tokens_: np.ndarray,\n ) -> Tuple:\n \"\"\"\n Take the output of any :obj:`ModelForQuestionAnswering` and will generate probabilities\n for each span to be the actual answer.\n\n In addition, it filters out some unwanted/impossible cases like answer len being greater\n than max_answer_len or answer end position being before the starting position. The method\n supports output the k-best answer through the topk argument.\n\n Args:\n start_ (:obj:`np.ndarray`): Individual start probabilities for each token.\n end (:obj:`np.ndarray`): Individual end_ probabilities for each token.\n topk (:obj:`int`): Indicates how many possible answer span(s) to extract from the model output.\n max_answer_len (:obj:`int`): Maximum size of the answer to extract from the model's output.\n undesired_tokens_ (:obj:`np.ndarray`): Mask determining tokens that can be part of the answer\n \"\"\"\n # Ensure we have batch axis\n if start_.ndim == 1:\n start_ = start_[None]\n\n if end_.ndim == 1:\n end_ = end_[None]\n\n # Compute the score of each tuple(start_, end_) to be the real answer\n outer = np.matmul(np.expand_dims(start_, -1), np.expand_dims(end_, 1))\n\n # Remove candidate with end_ < start_ and end_ - start_ > max_answer_len\n candidates = np.tril(np.triu(outer), max_answer_len - 1)\n\n # Inspired by Chen & al. (https://github.com/facebookresearch/DrQA)\n scores_flat = candidates.flatten()\n if topk == 1:\n idx_sort = [np.argmax(scores_flat)]\n elif len(scores_flat) < topk:\n idx_sort = np.argsort(-scores_flat)\n else:\n idx = np.argpartition(-scores_flat, topk)[0:topk]\n idx_sort = idx[np.argsort(-scores_flat[idx])]\n\n starts_, ends_ = np.unravel_index(idx_sort, candidates.shape)[1:]\n desired_spans = np.isin(starts_, undesired_tokens_.nonzero()) & np.isin(\n ends_, undesired_tokens_.nonzero()\n )\n starts_ = starts_[desired_spans]\n ends_ = ends_[desired_spans]\n scores_ = candidates[0, starts_, ends_]\n\n return starts_, ends_, scores_\n\n predictions, features = self._predict(request)\n # print(predictions, request)\n task_outputs = {\"answers\": []}\n for idx, (start, end, (_, context)) in enumerate(\n zip(predictions[\"start_logits\"], predictions[\"end_logits\"], request)\n ):\n start = start.cpu().detach().numpy()\n end = end.cpu().detach().numpy()\n # Ensure padded tokens & question tokens cannot belong to the set of candidate answers.\n question_tokens = np.abs(\n np.array([s != 1 for s in features.sequence_ids(idx)]) - 1\n )\n # Unmask CLS token for 'no answer'\n question_tokens[0] = 1\n undesired_tokens = question_tokens & features[\"attention_mask\"][idx].numpy()\n\n # Generate mask\n undesired_tokens_mask = undesired_tokens == 0.0\n\n # Make sure non-context indexes in the tensor cannot contribute to the softmax\n start = np.where(undesired_tokens_mask, -10000.0, start)\n end = np.where(undesired_tokens_mask, -10000.0, end)\n\n start = np.exp(\n start - np.log(np.sum(np.exp(start), axis=-1, keepdims=True))\n )\n end = np.exp(end - np.log(np.sum(np.exp(end), axis=-1, keepdims=True)))\n\n # print(start, end)\n\n # Get score for 'no answer' then mask for decoding step (CLS token\n no_answer_score = (start[0] * end[0]).item()\n start[0] = end[0] = 0.0\n\n starts, ends, scores = decode(start, end, 1, 128, undesired_tokens)\n enc = features[idx]\n answers = [\n {\n \"score\": score.item(),\n \"start\": enc.word_to_chars(enc.token_to_word(s), sequence_index=1)[\n 0\n ],\n \"end\": enc.word_to_chars(enc.token_to_word(e), sequence_index=1)[1],\n \"answer\": context[\n enc.word_to_chars(enc.token_to_word(s), sequence_index=1)[\n 0\n ] : enc.word_to_chars(enc.token_to_word(e), sequence_index=1)[1]\n ].lower(),\n }\n for s, e, score in zip(starts, ends, scores)\n ]\n answers.append(\n {\"score\": no_answer_score, \"start\": 0, \"end\": 0, \"answer\": \"\"}\n )\n answers = sorted(answers, key=lambda x: x[\"score\"], reverse=True)[:1]\n task_outputs[\"answers\"].append(answers)\n return task_outputs\n\n\nif __name__ == \"__main__\":\n base_model = \"bert-base-uncased\"\n adapter_model = \"AdapterHub/bert-base-uncased-pf-squad_v2\"\n model = BertAdapterModel.from_pretrained(base_model)\n tokenizer = AutoTokenizer.from_pretrained(base_model)\n adapter_name = model.load_adapter(adapter_model, source=\"hf\")\n model.active_adapters = adapter_name\n\n question, context = (\n \"Who patronized the monks in Italy?\",\n \"At Saint Evroul, a tradition of singing had developed and the choir achieved fame in Normandy. Under the Norman abbot Robert de Grantmesnil, several monks of Saint-Evroul fled to southern Italy, where they were patronised by Robert Guiscard and established a Latin monastery at Sant'Eufemia. There they continued the tradition of singing.\",\n )\n reduce = InputReduction(\n model,\n tokenizer,\n request={\"question\": question, \"context\": context, \"top_k\": 5, \"mode\": \"max\"},\n method=\"scaled_attn\",\n )\n resp = reduce.get_predictions()\n print(resp)\n\n # dataloader = PreprocessData(\"squad_v2\", \"squad_v2\", save_data=False, save_path=\"../../\")\n # outputs = list()\n # count = 0\n # for ex in tqdm(dataloader.processed_val_set()):\n # # print(ex)\n # result = dict()\n # reduce = InputReduction(model,\n # tokenizer,\n # request={\n # \"question\": ex[\"question\"],\n # \"context\": ex[\"context\"],\n # \"top_k\": 5,\n # \"mode\": \"max\"\n # },\n # method=\"shap\")\n # resp = reduce.get_predictions()\n # print(resp)\n # count += 1\n # if count == 2:\n # break\n","repo_name":"UKPLab/CATfOOD","sub_path":"src/calibration/baseline/input_reduction.py","file_name":"input_reduction.py","file_ext":"py","file_size_in_byte":16575,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"38262823926","text":"from collections import namedtuple\nfrom PyQt5.QtGui import QColor\n\npos_tags = {\n \"CC\": \"Coordinating conjunction\",\n \"CD\": \"Cardinal number\",\n \"DT\": \"Determiner\",\n \"EX\": \"Existential there\",\n \"FW\": \"Foreign word\",\n \"IN\": \"Preposition or subordinating conjunction\",\n \"JJ\": \"Adjective\",\n \"JJR\": \"Adjective, comparative\",\n \"JJS\": \"Adjective, superlative\",\n \"LS\": \"List item marker\",\n \"MD\": \"Modal verb\",\n \"NN\": \"Noun, singular or mass\",\n \"NNS\": \"Noun, plural\",\n \"NNP\": \"Proper noun, singular\",\n \"NNPS\": \"Proper noun, plural\",\n \"PDT\": \"Predeterminer\",\n \"POS\": \"Possessive ending\",\n \"PRP\": \"Personal pronoun\",\n \"PRP$\": \"Possessive pronoun\",\n \"RB\": \"Adverb\",\n \"RBR\": \"Adverb, comparative\",\n \"RBS\": \"Adverb, superlative\",\n \"RP\": \"Particle\",\n \"SYM\": \"Symbol\",\n \"TO\": \"The word 'to'\",\n \"UH\": \"Interjection\",\n \"VB\": \"Verb, base form\",\n \"VBD\": \"Verb, past tense\",\n \"VBG\": \"Verb, gerund or present participle\",\n \"VBN\": \"Verb, past participle\",\n \"VBP\": \"Verb, non-3rd person singular present\",\n \"VBZ\": \"Verb, 3rd person singular present\",\n \"WDT\": \"Wh-determiner\",\n \"WP\": \"Wh-pronoun\",\n \"WP$\": \"Possessive wh-pronoun\",\n \"WRB\": \"Wh-adverb\"\n}\n\n# Define the namedtuple for 256-color ANSI color codes\nColorCodes256 = namedtuple(\"ColorCodes256\", [\n \"RED\", \"ORANGE\", \"YELLOW\", \"GREEN\", \"CYAN\", \"LIGHT_BLUE\", \"BLUE\", \"PURPLE\",\n \"DARK_RED\", \"DARK_ORANGE\", \"DARK_YELLOW\", \"DARK_GREEN\", \"DARK_CYAN\", \"DARK_LIGHT_BLUE\", \"DARK_BLUE\", \"DARK_PURPLE\",\n \"BRIGHT_RED\", \"BRIGHT_ORANGE\", \"BRIGHT_YELLOW\", \"BRIGHT_GREEN\", \"BRIGHT_CYAN\", \"BRIGHT_LIGHT_BLUE\", \"BRIGHT_BLUE\", \"BRIGHT_PURPLE\",\n \"PASTEL_RED\", \"PASTEL_ORANGE\", \"PASTEL_YELLOW\", \"PASTEL_GREEN\", \"PASTEL_CYAN\", \"PASTEL_LIGHT_BLUE\", \"PASTEL_BLUE\", \"PASTEL_PURPLE\",\n \"GRAY1\", \"GRAY2\", \"GRAY3\", \"GRAY4\"\n])\n\n# Initialize the namedtuple with selected ANSI color codes for 256-color mode\ncolor_codes_256 = ColorCodes256(\n 196, 202, 226, 46, 51, 39, 21, 127,\n 160, 166, 190, 34, 39, 27, 18, 90,\n 203, 209, 229, 47, 51, 39, 21, 128,\n 203, 214, 227, 48, 52, 40, 22, 129,\n 236, 239, 242, 245\n)\n\ndef ansi_to_qcolor(ansi_code):\n if 0 <= ansi_code <= 15:\n # 16 basic colors\n table = [\n 0x000000, 0x800000, 0x008000, 0x808000, 0x000080, 0x800080, 0x008080, 0xc0c0c0,\n 0x808080, 0xff0000, 0x00ff00, 0xffff00, 0x0000ff, 0xff00ff, 0x00ffff, 0xffffff\n ]\n return QColor(table[ansi_code])\n elif 16 <= ansi_code <= 231:\n # 216 colors (6x6x6 RGB color cube)\n ansi_code -= 16\n r = ansi_code // 36\n g = (ansi_code % 36) // 6\n b = ansi_code % 6\n return QColor(r * 51, g * 51, b * 51)\n elif 232 <= ansi_code <= 255:\n # 24 grayscale colors\n level = (ansi_code - 232) * 10 + 8\n return QColor(level, level, level)\n else:\n raise ValueError(f\"Invalid ANSI color code: {ansi_code}\")\n\ncolor_codes_256 = [\n 196, 202, 226, 46, 51, 39, 21, 127,\n 160, 166, 190, 34, 39, 27, 18, 90,\n 203, 209, 229, 47, 51, 39, 21, 128,\n 203, 214, 227, 48, 52, 40, 22, 129,\n 236, 239, 242, 245\n]\n\nqcolors_temp = [ansi_to_qcolor(ansi_code) for ansi_code in color_codes_256]\nqcolors = ColorCodes256(*qcolors_temp)\nprint(qcolors)\npos_tag_colors = dict(zip(pos_tags.keys(), qcolors))\n# print(pos_tag_colors)","repo_name":"veyronvenom1200/code","sub_path":"python/WordAnalyzer/wordanalysis/colorcode.py","file_name":"colorcode.py","file_ext":"py","file_size_in_byte":3357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7822415487","text":"BATCH_SIZE = 150\nEPOCHS = 100\nWORKERS = 0\nLEARNING_RATE = 0.001\nWEIGHT_DECAY = 0.0001\n\nTRAIN_FACE_DIR = ['data/face']\nTRAIN_NO_FACE_DIR = ['data/no_face']\n\nTEST_FACE_DIR = ['test/face']\nTEST_NO_FACE_DIR = ['test/no_face']\n","repo_name":"caprapaul/assignments","sub_path":"semester_4/ai/Assignment8/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7693717901","text":"def create_hero():\n name = input(\"请输入英雄的名称:\")\n blood = input(\"请输入英雄的血量:\")\n power = input(\"请输入英雄的攻击力:\")\n hero = dict(name=name, blood=blood, power=power)\n li_hero.append(hero)\n print(\"创建成功\")\ndef query_hero():\n name1 = input(\"请输入需要查询的英雄信息: \")\n ff = False\n for j in li_hero:\n\n if name1 == j.get('name'):\n ff = True\n print(\"英雄 %s 的信息为\" % name1)\n print(j)\n if ff == False:\n print(\"没有英雄 %s 的信息\" % name1)\ndef update_hero():\n name3 = input(\"请问你要修改哪个英雄的血量 \")\n ff = False\n for j in li_hero:\n\n if name3 == j.get('name'):\n li_hero.pop(li_hero.index(j))\n blood1 = input(\"请问你要将血量修改为多少? \")\n j['blood'] = blood1;\n li_hero.append(j)\n ff = True\n print(\"更新之后的结果为\")\n print(li_hero)\n break\n if ff == False:\n print(\"更新之后的结果为没有找到。\")\ndef delete_hero():\n name2 = input(\"请问你要删除哪个英雄? \")\n ff = False\n for j in li_hero:\n if name2 == j.get('name'):\n li_hero.remove(j)\n ff = True\n print(\"删除之后所有的英雄的数据信息为 \")\n print(li_hero)\n if ff == False:\n print(\"更新之后的结果为没有找到。\")\nStr_1=\"1. **创建英雄** 当前游戏中,创建英雄角色,定义好对应英雄的血量及其攻击力。\"\nStr_2=\"2. **查看英雄信息** 查看当前游戏中所有的英雄信息。\"\nStr_3=\"3. **修改英雄信息** 修改英雄的血量。\"\nStr_4=\"4. **删除英雄** 英雄太弱,不需要,删除掉。\"\nStr_5=\"5. **退出系统** 结束程序。\"\nprint(Str_1+\"\\n\"+Str_2+\"\\n\"+Str_3+\"\\n\"+Str_4+\"\\n\"+Str_5)\n\nli_input = [\"1\",\"2\",\"3\",\"4\",\"5\"]\nli_hero=[]\ni=\"1\"\n\n\nwhile i in li_input:\n i = input(\"请输入数字,选择需要完成的操作:\")\n if i == \"1\":\n create_hero()\n continue\n if i == \"2\":\n query_hero()\n continue\n if i == \"3\":\n update_hero()\n continue\n if i == \"4\":\n delete_hero()\n continue\n if i == \"5\":\n break\nprint(\"退出系统\")","repo_name":"goodwinatm/python_learn","sub_path":"英雄管理.py","file_name":"英雄管理.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15772382306","text":"from typing import List\nimport math\nfrom heapq import heappush as hpush\nfrom heapq import heappop as hpop\n\n\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def __init__(self):\n pass\n\n def minimumTotal(self, triangle: List[List[int]]) -> int:\n min_sum = math.inf\n n = len(triangle)\n memo = dict()\n def get_minimum(index, path_sum, depth):\n nonlocal min_sum, triangle, n, memo\n\n if (depth, index) in memo:\n return memo[(depth, index)]\n\n if depth == n:\n return path_sum\n\n for i in [index, index + 1]:\n val = triangle[depth][i]\n min_value = get_minimum(i, path_sum + val, depth + 1)\n min_sum = min(min_sum, min_value)\n memo[(depth, index)] = min_sum\n return min_sum\n\n\n get_minimum(0, triangle[0][0], 1)\n # print(memo)\n return min_sum if min_sum != math.inf else triangle[0][0]\n\n\nindex = 0\nfor _input, expected_output in \\\n [\n # ([[2],[3,4],[6,5,7],[4,1,8,3]], 11),\n # ([[-10]], -10),\n ([[-7],[-2,1],[-5,-5,9],[-4,-5,4,4],[-6,-6,2,-1,-5],[3,7,8,-3,7,-9],[-9,-1,-9,6,9,0,7],[-7,0,-6,-8,7,1,-4,9],[-3,2,-6,-9,-7,-6,-9,4,0],[-8,-6,-3,-9,-2,-6,7,-5,0,7],[-9,-1,-2,4,-2,4,4,-1,2,-5,5],[1,1,-6,1,-2,-4,4,-2,6,-6,0,6],[-3,-3,-6,-2,-6,-2,7,-9,-5,-7,-5,5,1]], -63),\n ]:\n s = Solution()\n index += 1\n actual_output = s.minimumTotal(_input)\n assert actual_output == expected_output, \"tc{} failed => {}\".format(index, actual_output)\nelse:\n print('\\n*** All tests passed successfully! ***')\n","repo_name":"sachinsfo/leetcode_explore","sub_path":"202104_April/21_LC120_MinPathSumTriangle.py","file_name":"21_LC120_MinPathSumTriangle.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35730409597","text":"import datetime\n\nfrom pymongo import MongoClient\nfrom sqlalchemy import Table\n\n\ndef get_headers(table: Table):\n return [str(col).split(\".\")[1] for col in table._columns]\n\n\ndef normalize(entity):\n if type(entity) == datetime.date:\n return datetime.datetime.combine(entity, datetime.datetime.min.time())\n return entity\n\n\ndef get_data_dict(data, headers):\n res = []\n for dat in data:\n res.append({headers[i]: normalize(dat[i]) for i, _ in enumerate(dat)})\n return res\n\n\ndef handle_mongo_insert(tables, data, client: MongoClient):\n for i, _ in enumerate(tables):\n headers = get_headers(tables[i])\n d = get_data_dict(data[i], headers)\n collection = client[\"cluster0\"][tables[i].name]\n try:\n res = collection.insert_many(d)\n except Exception as e:\n print(e)\n","repo_name":"mainak-affinsys/pymigrate","sub_path":"pymigrate/databases/mongo_handler.py","file_name":"mongo_handler.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24440019604","text":"import itertools\n\nwhile True:\n i = list(map(int, input().split()))\n if i == [0]:\n break\n k, S = i[0], i[1:]\n perms = itertools.combinations(S, 6)\n for perm in perms:\n print(*perm)\n\n print()","repo_name":"bassyu/ps","sub_path":"BOJ/06603.py","file_name":"06603.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33969965998","text":"import json\n\nusername = input('What is your name? ').title()\n\nfilename = 'username.json'\nwith open(filename, 'w') as file_object:\n \"\"\"Usei json.load() para ler as informações armazenadas em\n username.json na variável username. Agora posso recuperar o nome do\n usuário, e lhe desejar as boas-vindas de volta\"\"\"\n json.dump(username, file_object)\n print(f\"We'll remember you when come back, {username}!\")","repo_name":"carlosrjhoe/Python","sub_path":"Livro_Curso_Intensivo_de_Python/Arquivos_Excessoes/Armazenando_dados/salvando_lendo_dados_do_usuario.py","file_name":"salvando_lendo_dados_do_usuario.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28004783396","text":"import numpy as np\nimport csv\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\n\ndef plot_trend(x, y):\n \"\"\" Plot 1D data \"\"\"\n #change string to float in y\n# for i in y:\n# if i =='NULL':\n \n fig = plt.figure()\n fig.suptitle('pm25 trend from 2014-05-01 to 2015-04-30', fontsize = 14, fontweight='bold')\n \n ax = fig.add_subplot(1,1,1)\n ax.set_xlabel(\"time\")\n ax.set_ylabel(\"pm25\")\n ax.set_xlim(735354,xmax=max(x))\n \n ax.xaxis.set_major_locator(mdates.DayLocator(bymonthday=range(1,32),interval=15))\n ax.xaxis.set_major_formatter(mdates.DateFormatter(\"%Y-%m-%d\"))\n for label in ax.xaxis.get_ticklabels():\n label.set_rotation(45)\n \n ax.plot(x,y)\n plt.show()\n \nif __name__=='__main__':\n filename = \"/root/workspace/airpollution/airquality.csv\"\n #Station ID Time PM25 PM10 NO2 CO O3 SO2\n #001001,2014-05-01 00:00:00,138,159.4,56.3,0.9,50.8,17.2\n \n st_id, dates, PM25, PM10, NO2, CO, O3, SO2 = np.genfromtxt(filename,delimiter=\",\", skip_header =1,unpack= True, missing_values=\"NULL\", filling_values=np.nan, converters={1:mdates.strpdate2num('%Y-%m-%d %H:%M:%S')})\n plot_trend(dates, PM25)","repo_name":"pkuazi/pm25","sub_path":"datavis.py","file_name":"datavis.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13443775825","text":"from http import HTTPStatus\nfrom typing import Annotated\n\nfrom fastapi import APIRouter, Depends, HTTPException\n\nimport app.db.models as db\nfrom app.api.api_v1.dependencies import (\n BlockSearchParameters,\n ListParametersDecimal,\n UnicodeBlockPathParamResolver,\n UnicodePlaneResolver,\n)\nfrom app.api.api_v1.pagination import paginate_search_results\nfrom app.core.config import get_settings\nfrom app.data.cache import cached_data\n\nrouter = APIRouter()\n\n\n@router.get(\"\", response_model=db.PaginatedList[db.UnicodeBlockResponse], response_model_exclude_unset=True)\ndef list_all_unicode_blocks(\n list_params: Annotated[ListParametersDecimal, Depends()], plane: Annotated[UnicodePlaneResolver, Depends()]\n):\n (start, stop) = get_block_list_endpoints(list_params, plane)\n return {\n \"url\": f\"{get_settings().API_VERSION}/blocks\",\n \"has_more\": stop <= plane.finish_block_id,\n \"data\": [cached_data.get_unicode_block_by_id(id).as_response() for id in range(start, stop)],\n }\n\n\n@router.get(\n \"/search\",\n response_model=db.PaginatedSearchResults[db.UnicodeBlockResult],\n response_model_exclude_unset=True,\n)\ndef search_unicode_blocks_by_name(\n search_params: Annotated[BlockSearchParameters, Depends()],\n):\n params = {\n \"url\": f\"{get_settings().API_VERSION}/blocks/search\",\n \"query\": search_params.name,\n }\n results = cached_data.search_blocks_by_name(search_params.name, search_params.min_score)\n if not results:\n return params | {\n \"current_page\": 0,\n \"total_results\": 0,\n \"has_more\": False,\n \"results\": [],\n }\n block_ids = [block_id for (block_id, _) in results]\n paginate_result = paginate_search_results(block_ids, search_params.per_page, search_params.page)\n if paginate_result.failure:\n raise HTTPException(status_code=int(HTTPStatus.BAD_REQUEST), detail=paginate_result.error)\n paginated = paginate_result.value if paginate_result.value else {}\n start = paginated.pop(\"start\", 0)\n end = paginated.pop(\"end\", 0)\n paginated[\"results\"] = [\n cached_data.get_unicode_block_by_id(block_id).as_search_result(score)\n for (block_id, score) in results[start:end]\n ]\n return params | paginated\n\n\n@router.get(\n \"/{name}\",\n response_model=db.UnicodeBlockResponse,\n response_model_exclude_unset=True,\n)\ndef get_unicode_block_details(name: Annotated[UnicodeBlockPathParamResolver, Depends()]):\n return name.block.as_response()\n\n\ndef get_block_list_endpoints(list_params: ListParametersDecimal, plane: UnicodePlaneResolver) -> tuple[int, int]:\n start = plane.start_block_id\n if list_params.starting_after:\n start = list_params.starting_after + 1\n if list_params.ending_before:\n start = list_params.ending_before - list_params.limit\n stop = min(plane.finish_block_id + 1, start + list_params.limit)\n if start >= plane.start_block_id and start <= plane.finish_block_id:\n return (start, stop)\n raise HTTPException(\n status_code=int(HTTPStatus.BAD_REQUEST),\n detail=(\n f\"The starting block id ({start}) is not within the range of blocks which comprise the \"\n f\"specified Unicode plane ({plane.plane.name}): first block: {plane.start_block_id}, \"\n f\"last block: {plane.finish_block_id}\"\n ),\n )\n","repo_name":"a-luna/unicode-api","sub_path":"app/api/api_v1/endpoints/blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":3366,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"31412227915","text":"import base64\nimport argparse\nimport googleapiclient.discovery\nimport google.auth\nimport datetime\n\n# [START iam_create_key]\ndef create_key(service_account_email):\n \"\"\"Creates a key for a service account.\"\"\"\n\n credentials, project_id = google.auth.default()\n #project_id = service_account_email.split('.')[0].split('@')[1]\n service = googleapiclient.discovery.build(\n 'iam', 'v1', credentials=credentials)\n\n key = service.projects().serviceAccounts().keys().create(\n name='projects/-/serviceAccounts/' + service_account_email, body={}\n ).execute()\n\n json_key_file = base64.b64decode(key['privateKeyData']).decode('utf-8')\n\n# [END iam_create_key]\n\n# [START iam_list_keys]\ndef list_keys(service_account_email):\n \"\"\"Lists all keys for a service account.\"\"\"\n\n credentials, project_id = google.auth.default()\n\n service = googleapiclient.discovery.build(\n 'iam', 'v1', credentials=credentials)\n\n keys = service.projects().serviceAccounts().keys().list(\n name='projects/-/serviceAccounts/' + service_account_email, keyTypes=\"USER_MANAGED\").execute()\n\n for key in keys['keys']:\n print(key['name'])\n# [END iam_list_keys]\n\n# [START delete_expired_keys]\ndef delete_expired_keys(service_account_email):\n \"\"\"Delete expired keys for a service account.\"\"\"\n credentials, project_id = google.auth.default()\n service = googleapiclient.discovery.build(\n 'iam', 'v1', credentials=credentials)\n\n keys = service.projects().serviceAccounts().keys().list(\n name='projects/-/serviceAccounts/' + service_account_email, keyTypes=\"USER_MANAGED\").execute()\n\n for key in keys['keys']:\n keyname = key['name']\n expiration_date = key['validBeforeTime']\n expiration_datetime = datetime.datetime.strptime(expiration_date, '%Y-%m-%dT%H:%M:%SZ')\n now = datetime.datetime.now()\n days_until_expiration = (expiration_datetime - now).days\n if days_until_expiration < 0:\n service.projects().serviceAccounts().keys().delete(\n name=keyname).execute()\n\n# [START iam_delete_key]\ndef delete_key(full_key_name):\n \"\"\"Deletes a service account key.\"\"\"\n\n credentials, project_id = google.auth.default()\n\n service = googleapiclient.discovery.build(\n 'iam', 'v1', credentials=credentials)\n\n service.projects().serviceAccounts().keys().delete(\n name=full_key_name).execute()\n\n print('Deleted key: ' + full_key_name)\n# [END iam_delete_key]\n\ndef main():\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n\n subparsers = parser.add_subparsers(dest='command')\n\n create_key_parser = subparsers.add_parser(\n 'create', help=create_key.__doc__)\n create_key_parser.add_argument('service_account_email')\n\n list_keys_parser = subparsers.add_parser(\n 'list', help=list_keys.__doc__)\n list_keys_parser.add_argument('service_account_email')\n\n delete_key_parser = subparsers.add_parser(\n 'delete', help=delete_key.__doc__)\n delete_key_parser.add_argument('full_key_name')\n\n delete_keys_parser = subparsers.add_parser(\n 'delete_expired_keys', help=delete_key.__doc__)\n delete_keys_parser.add_argument('service_account_email')\n\n args = parser.parse_args()\n\n if args.command == 'list':\n list_keys(args.service_account_email)\n elif args.command == 'create':\n create_key(args.service_account_email)\n elif args.command == 'delete':\n delete_key(args.full_key_name)\n elif args.command == 'delete_expired_keys':\n delete_expired_keys(args.service_account_email)\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"jasonbisson/terraform-google-service-account-keys","sub_path":"files/service_account_keys.py","file_name":"service_account_keys.py","file_ext":"py","file_size_in_byte":3687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35468672685","text":"#!/usr/bin/python\n\nfrom itertools import count\nimport sys, getopt, re\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport matplotlib.gridspec as gridspec\n\ncolorarray = ['brown','green','fuchsia','orange','blue','lime']\n\ndef parseopt(argv):\n path=\"\"\n try:\n opts, args = getopt.getopt(argv,\"hp:\",[\"path=\"])\n except getopt.GetoptError:\n print ('show_flowfield.py -p ')\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print ('show_flowfield.py -p ')\n sys.exit()\n elif opt in (\"-p\"):\n path = arg\n print ('input path is',path)\n else:\n print ('show_flowfield.py -p ')\n sys.exit()\n return path\n\ndef readfile(path):\n lines = []\n #print(path) \n with open(str(path)) as f:\n lines = f.readlines()\n result = []\n count = 1\n M = 0\n N = 0\n U = []\n V = []\n P = []\n for line in lines:\n if count == 1:\n SZ = line.split(' ')\n M = int(SZ[0])\n N = int(SZ[1])\n elif (count>=3)&(count<=(2+N+1)): # U\n Ui = line.split(', ')[0:-1]\n U.append([float(i) for i in Ui])\n elif (count>=2+N+4)&(count<=(2+2*N+3)): # V\n Vi = line.split(', ')[0:-1]\n V.append([float(i) for i in Vi])\n elif (count>=2+2*N+6)&(count<=(2+3*N+5)): # P\n Pi = line.split(', ')[0:-1]\n P.append([float(i) for i in Pi])\n count = count+1\n return M, N, U, V, P\n\ndef plotStream(M,N,U,V):\n X = np.arange(0.5/N,1,1/N)\n Y = np.arange(0.5/M,1,1/M)\n gY, gX = np.mgrid[0:(N+0.5)/N:1/N,0:(M+0.5)/M:1/M]\n theU = np.zeros((N+1,M+1))\n theV = np.zeros((N+1,M+1))\n for i in range(0,N+1):\n #print(theU[i,:])\n #print(np.size(gX[0,:]))\n #print(X)\n #rint(U[i][:])\n theU[i,:] = np.interp(gX[0,:],X,U[i][:])\n for i in range(0,M+1):\n theV[:,i] = np.interp(gY[:,0],Y,np.array(V)[:,i])\n #print(theU)\n #print(theV)\n mag = np.power((np.power(theU.transpose(),2)+np.power(theV.transpose(),2)),0.5)\n fig = plt.figure(figsize=(7, 9))\n gs = gridspec.GridSpec(nrows=1, ncols=1)\n ax1 = fig.add_subplot(gs[0, 0])\n strm = ax1.streamplot(gX, gY, theU.transpose(), theV.transpose(), color=mag, linewidth=2, cmap='RdYlBu')\n ax2 = fig.colorbar(strm.lines)\n ax1.set_title('Velocity Stream Line')\n ax1.set_xlabel('X position(%)')\n ax1.set_ylabel('Y Position(%)')\n ax2.ax.set_ylabel('Velocity Magnitude (m/s)')\n plt.tight_layout()\n plt.show()\n\ndef plotPressure(M,N,P):\n X = np.arange(0.5/N,1,1/N)\n Y = np.arange(0.5/M,1,1/M)\n gX, gY = np.meshgrid(X,Y)\n #print(np.shape(gX))\n #print(np.shape(P))\n #print(P)\n #print(np.array(P)[:,0:M])\n fig = plt.figure()\n ax1 = fig.add_subplot()\n ctr = ax1.contourf(gX, gY, np.array(P)[:,0:M].transpose(), 10, alpha=1)\n ax2 = fig.colorbar(ctr)\n ax1.set_title('Pressure Contour')\n ax1.set_xlabel('X position(%)')\n ax1.set_ylabel('Y Position(%)')\n ax2.ax.set_ylabel('Pressure Magnitude (Pa)')\n plt.show()\n\nif __name__ == \"__main__\":\n path = parseopt(sys.argv[1:])\n M, N, U, V, P = readfile(path)\n plotStream(M,N,U,V)\n plotPressure(M,N,P)\n #print(result)\n #plotrect(result)\n\n","repo_name":"TieneSabor/SIMPLER","sub_path":"script/show_flowfield.py","file_name":"show_flowfield.py","file_ext":"py","file_size_in_byte":3342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20096366231","text":"import sys\nimport pygame\n\nfrom settings import Settings\nimport game_functions as gf\n\ndef run_game():\n # Initialize and create game screen\n pygame.init()\n settings = Settings() # Set settings\n screen = pygame.display.set_mode( (settings.screen_width, settings.screen_height) ) # Initialize screen\n pygame.display.set_caption(\"Sudoku\")\n unsolved_grid = gf.read_file(settings.solve_dir + \"grid.txt\") # Get unsolved grid\n solved_grid = gf.solve_grid(settings, settings.solve_dir + \"solved_puzzle.txt\") # Solve grid, then get solved grid\n\n # Main loop for game\n while True:\n gf.check_events(settings, screen)\n if (settings.solved == True):\n gf.update_screen(settings, screen, solved_grid)\n else:\n gf.update_screen(settings, screen, unsolved_grid)\n\nrun_game()\n","repo_name":"bzeeno/Sudoku-Solver","sub_path":"GUI/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19334808482","text":"# coding:utf-8\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport vgg_train\nimport utils\nfrom Nclasses import labels\n\n# img_path = raw_input('Input the path and image name:')\nimg_path = input('Input the path and image name:')\n# 用自己定义的load_image对待识别图片进行预处理\nimg_ready = utils.load_image(img_path)\nprint(\"img_ready shape\", tf.Session().run(tf.shape(img_ready)))\n\nfig = plt.figure(u\"Top-5 预测结果\")\n\nwith tf.Session() as sess:\n images = tf.placeholder(tf.float32, [1, 224, 224, 3])\n # 实例化vgg\n vgg = vgg_train.Vgg16() # 运行vgg类的初始化函数,读出保存在npy文件中的模型参数\n # 运行前向传播函数,复现神经网络结构\n vgg.forward(images)\n # 将待识别图像作为输入,喂入计算softmax的节点vgg.prob\n probability = sess.run(vgg.prob, feed_dict={images: img_ready})\n # 将probability列表中,概率最高的5个,所对应的列表索引值,存入top5\n top5 = np.argsort(probability[0])[-1:-6:-1]\n print(\"top5:\", top5)\n values = [] # 新建列表,用来存probability的值\n bar_label = [] # 新建列表,用来存标签列表中对应的值,即物种名称\n for n, i in enumerate(top5): # 打印键和值\n print(\"n:\", n)\n print(\"i:\", i)\n values.append(probability[0][i])\n bar_label.append(labels[i])\n print(i, \":\", labels[i], \"----\", utils.percent(probability[0][i])) # 打印每个物种出现的概率\n\n # 画布一行一列\n ax = fig.add_subplot(111)\n # 绘制柱状图\n ax.bar(range(len(values)), values, tick_label=bar_label, width=0.5, fc='g')\n # 横轴标签\n ax.set_ylabel(u'probabilityit')\n # 标题\n ax.set_title(u'Top-5')\n # 在每个柱子顶端添加预测概率值\n for a, b in zip(range(len(values)), values):\n ax.text(a, b + 0.0005, utils.percent(b), ha='center', va='bottom', fontsize=7)\n plt.show() # 弹窗显示图像\n","repo_name":"yunxijun/TensorflowNotes","sub_path":"vgg_test.py","file_name":"vgg_test.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"25232684592","text":"import Adafruit_BBIO.GPIO as GPIO\nimport sys,os,time\nimport threading\nrunning = True\n\ndef main():\n\tglobal running # other wise we cant stop thread\n\toutputs = [\"P9_12\",\"P9_14\",\"P9_16\",\"P9_18\"]\n\tinputs = [\"P9_11\",\"P9_13\",\"P9_15\",\"P9_17\"]\n\n\t#set up pins\n\tfor out in outputs:\n\t\tGPIO.setup(out,GPIO.OUT)\n\t\tGPIO.output(out,GPIO.HIGH)\n\tfor ins in inputs:\n\t\tGPIO.setup(ins,GPIO.IN)\n\t\tGPIO.add_event_detect(ins,GPIO.FALLING)#sets a flag on the pin\n\n\t#start thread to monitor inputs\n\tthread = threading.Thread(target= lambda:switchOnFall(inputs,outputs))\n\tthread.start()\n\n\tprint(\"Enter anything to quit\")\n\traw_input() #stall program till user presses enter, \n\n\t#clean up\n\trunning = False\n\tfor out in outputs:\n\t\tGPIO.output(out,GPIO.LOW)\n\tGPIO.cleanup()\n\ndef switchOnFall(inPins,outPins):\n\t#thread function\n\twhile running:\n\t\tevent = False\n\t\t#GPIO.wait_for_edge(inPin,GPIO.FALLING) #blocking call\n\t\tfor Kappa in range(0,len(inPins)):\n\t\t\tinPin = inPins[Kappa]\n\t\t\toutPin = outPins[Kappa]\n\t\t\tif(GPIO.event_detected(inPin)): #polls the flag\n\t\t\t\tGPIO.output(outPin, 1 ^ GPIO.input(outPin))\n\t\t\t\tevent = True\n\t\ttime.sleep(0.1) # debouncing time frame\n\t\tif(event):\n\t\t\tfor inPin in inPins:\n\t\t\t\tGPIO.event_detected(inPin) # clears all pin flags\n\nif(__name__==\"__main__\"):\n\tmain()\n","repo_name":"koontz/AlvinKoontz_ECE-497","sub_path":"hw02/gpioLEDS.py","file_name":"gpioLEDS.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74584525811","text":"\"\"\"Produces various plots for iris.data\"\"\"\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport argparse\n\n\ndef main():\n\n parser = argparse.ArgumentParser(\n description='plot iris data',\n prog='plotter')\n\n parser.add_argument('--file_name',\n type=str,\n help='Name of the file with iris data',\n required=True)\n\n args = parser.parse_args()\n\n # load in data set and assign column names\n try:\n iris = pd.read_csv(args.file_name, header=None)\n iris.columns = ['sepal_width', 'sepal_length', 'petal_width',\n 'petal_length', 'iris_species']\n except Exception as e:\n print('Error of type ' + str(type(e)) + ' occurred')\n sys.exit(1)\n\n # produce boxplot of all measurements across all iris species\n try:\n measurement_names = ['sepal_width', 'sepal_length', 'petal_width',\n 'petal_length']\n plt.boxplot(iris[measurement_names], labels=measurement_names)\n plt.ylabel('cm')\n plt.savefig('iris_boxplot.png')\n # plt.show()\n except Exception as e:\n print('Error of type ' + str(type(e)) + ' occurred')\n sys.exit(1)\n\n # produce scatterplot of petal length vs. petal width for all iris species\n try:\n for species_name in set(iris['iris_species']):\n iris_subset = iris[iris['iris_species'] == species_name]\n plt.scatter(iris_subset['petal_width'],\n iris_subset['petal_length'],\n label=species_name,\n s=5)\n plt.legend()\n plt.xlabel('petal_width (cm)')\n plt.ylabel('petal_length (cm)')\n plt.savefig('petal_length_v_width_scatter.png')\n # plt.show()\n except Exception as e:\n print('Error of type ' + str(type(e)) + ' occurred')\n sys.exit(1)\n\n # produce both boxplot and scatterplot side by side\n try:\n fig, axes = plt.subplots(2, 2)\n fig.delaxes(axes[1, 0])\n fig.delaxes(axes[1, 1])\n\n # left boxplot\n axes[0, 0].boxplot(iris[measurement_names], labels=measurement_names)\n axes[0, 0].set_xticklabels(measurement_names, fontsize=6)\n axes[0, 0].set_ylabel('cm', fontsize=8)\n axes[0, 0].tick_params(axis='y', labelsize=8)\n\n # right scatterplot\n for species_name in set(iris['iris_species']):\n iris_subset = iris[iris['iris_species'] == species_name]\n axes[0, 1].scatter(iris_subset['petal_width'],\n iris_subset['petal_length'],\n label=species_name,\n s=5,\n alpha=0.5)\n axes[0, 1].legend(loc='upper left', prop={'size': 6})\n axes[0, 1].set_xlabel('petal_width (cm)', fontsize=8)\n axes[0, 1].set_ylabel('petal_length (cm)', fontsize=8)\n axes[0, 1].tick_params(axis='x', labelsize=8)\n axes[0, 1].tick_params(axis='y', labelsize=8)\n\n # remove top and right borders of plots\n for i in range(2):\n for j in range(2):\n axes[i, j].spines['top'].set_visible(False)\n axes[i, j].spines['right'].set_visible(False)\n axes[i, j].spines['bottom'].set_visible(True)\n axes[i, j].spines['left'].set_visible(True)\n\n plt.savefig('multi_panel_figure.png')\n # plt.show()\n except Exception as e:\n print('Error of type ' + str(type(e)) + ' occurred')\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"cu-swe4s-fall-2022/assignment-7-using-libraries-NessaGrace","sub_path":"plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":3646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42551326922","text":"import pandas as pd\nimport math\nfrom collections import OrderedDict\nimport time\n\n\n### Description de la fonction run_algo :\n'''Utilisez cette fonction pour démarrer l'algorithme.\n filepath : addresse relative du fichier, par exemple Algos/instances/captANOR900_15_20.dat\n Rcapt : Rayon de captation\n Rcom : Rayon de communication\n k : degré de couvertue à atteindre\n taille_tabou : taille de la liste des tabous, typiquement 3 ou 7.\n temps_limite : critère d'arrêt. Peut être un temps (en MINUTES) ou un nombre d'itération de la métaheuristique, dépend de la valeur de condition\n condition : détermine le critère d'arrêt. 0 si vous voulez un nombre maximum d'itération, 1 si vous voulez un temps maximal\n version : détermine la méthode pour trouver, voisinage (1 ou 2)\n '''\n\ndef run_algo(filepath, Rcapt, Rcom, k, taille_tabou, temps_limite, condition=1, version=2):\n temps_limite *= 60\n data = read_data(filepath)\n\n adj_capt, adj_com = adjacence(data, Rcapt, Rcom)\n print(\"Résolution du problème au fichier : \", filepath, \"\\n taille tabou : \", taille_tabou, \"\\n k : \", k, \"\\n Rcapt : \", Rcapt,\n \"\\n Rcom : \", Rcom)\n\n C, lc, ld = parcours(adj_capt, adj_com, k)\n print(\"Réalisabilité de la solution initiale : \" + str(\n vérifier_réalisabilité(adj_capt, adj_com, C, k)))\n preds, succs = construire_chemin(adj_com, C)\n nouv_C = post_traitement(adj_capt, adj_com, k, preds, succs, C, 2)\n print(\"Réalisabilité de la solution après traitement : \" + str(\n vérifier_réalisabilité(adj_capt, adj_com, nouv_C, k)))\n t1 = time.time()\n meilleur_chemin, minimum, i, lc, minimums = parcours_voisinages(nouv_C, adj_capt, adj_com, k, taille_tabou, temps_limite, condition, version)\n t2 = time.time()\n print( \"Algorithme terminé, resultat final : \" + str(minimum)\n + \"\\n Résultat initial : \" + str(len(nouv_C)) + \"\\n temps écoulé lors de la métaheuristique : \" + str(\n t2 - t1) + \"\\n Taille tabou : \" + str(taille_tabou) + \"\\n\")\n\n\n\n###Description de la fonction parcours :\n'''C'est une fonction qui prend en entrée le dictionnaire représentant la liste d'adjacence de captation, le dictionnaire représentant la liste d'adjacence de communication et l'entier k qui précise le degré de couverture.\nCette fonction effectue un parcours de graphe tout au long duquel elle ajoute des capteurs en assurant la k-couverture de chaque cible parcourue et l'existence du chemin entre les capteurs et le puits. (Plus d'explication dans le rapport).\nElle retourne C, len(C), len(D) : (liste des cibles où on a des capteurs, nombre de capteurs, nombre de cibles k-couvertes)'''\ndef parcours(adjacence_capt, adjacence_com, k):\n W = [] #Liste qui va contenir les cibles qui ne sont pas encore explorées lors du parcours par profondeur\n D = set() #ensemble des cibles k-couvertes\n n = len(adjacence_com) - 1 #nombre total des cibles sans compter le puits\n p = (\"0.00\", \"0.00\") #Selon le format des données le puits est toujours la première clé\n qualites_voisins_p = {} #dictionnaire indiquant pour chaque voisin du puits dans le graphe de communication sa qualité\n for elem in adjacence_com[p]:\n qualites_voisins_p[elem] = qualite(elem, adjacence_capt, D)\n max_qualite = 0 #variable qui va contenir la qualité maximum d'un voisin du puits dans Gcom\n max_voisin = (\"\",\"\") #variable qui va contenir le voisin de p dans Gcom de qualité maximum\n for elem in qualites_voisins_p:\n if(qualites_voisins_p[elem] > max_qualite):\n max_voisin = elem\n max_qualite = qualites_voisins_p[elem]\n c = max_voisin #c, le voisin de puits de qualité maximmum dans Gcom va être la première cible où on place le premier capteur\n C = [c]\n capteurs_cibles = {} #dictionnaire qui pour chaque capteur indique une liste des cibles couvertes par ce capteur\n cibles_nombre_capteurs = {} #dictionnaire qui pour chaque cible indique le nombre de capteurs qui la couvrent\n cibles_vois_c_com = {} #nombre de voisins pour une cible qui sont à la fois dans Gcom et dans C\n cible_courante = c\n cibles_visitées = {} #dictionnaire qui donne 1 comme valeur à une cible qui est a été visitée et 0 à une qui n'est pas encore visitée\n liste_cibles = list(adjacence_capt.keys()) #liste contenant les cibles\n if((\"0.00\", \"0.00\") in liste_cibles):\n liste_cibles.remove((\"0.00\", \"0.00\"))\n for cible in liste_cibles: #initialisation de cibles_visitées et cibles_vois_c_com où on met toutes les valeurs à 0\n cibles_visitées[cible] = 0\n cibles_vois_c_com[cible] = 0\n cibles_visitées[c] = 1 #la cible c a été visitée\n for cible in adjacence_capt[c]: #Pour les voisins de c dans Gcapt\n ajouter_elem_a_dict_type1(capteurs_cibles, c, cible) #on ajoute c dans la liste des capteurs qui couvrent chaque voisin dans le dictionnaire capteurs_cibles\n ajouter_elem_a_dict_type2(cibles_nombre_capteurs, cible) #et on incrémente le nombre de capteurs qui couvrent chaque voisin dans cibles_nombre_capteurs\n if(cibles_nombre_capteurs[cible] == k): #Si pour un voisin de C on trouve qu'il est couvert par k capteurs on l'ajoute à l'ensemble D\n D.add(cible)\n for cible in adjacence_com[c]: #Pour chaque voisin de c dans Gcom on incrémente le nombre de capteurs avec lesquels il peut communiquer dans cibles_vois_c_com\n ajouter_elem_a_dict_type2(cibles_vois_c_com, cible)\n for cible in adjacence_com[(\"0.00\", \"0.00\")]: #Pour chaque voisin du puits dans Gcom on incrémente le nombre de capteurs avec lesquels il peut communiquer dans cibles_vois_c_com car le but c'est de communiquer avec le puits\n ajouter_elem_a_dict_type2(cibles_vois_c_com, cible)\n ajouter_elem_a_dict_type1(capteurs_cibles, c, c) #On n'oublie pas d'ajouter le capteur c lui-même à la liste des cibles couvertes par c\n ajouter_elem_a_dict_type2(cibles_nombre_capteurs, c) #On n'oublie pas d'incrémenter le nombre de capteurs couvrant c aussi\n if(cibles_nombre_capteurs[c] == k): #On teste si c est couvert par k capteurs après ces modifications et s'il l'est on l'ajoute à D\n D.add(c)\n ajouter_elem_a_dict_type2(cibles_vois_c_com, c) #On incrémente aussi le nombre de capteurs avec qui c peut communiquer dans C (il fait partie de C)\n for elem in adjacence_com[p]: #On ajoute les voisins de puits non explorés dans W\n if(elem != c):\n W.append(elem)\n \n \n while(len(D) != n): #Tant qu'il y a des cibles non k-couvertes\n \n \n for voisin in adjacence_capt[cible_courante]: #Pour tout voisin de captation de la cible courante, si ce voisin n'a pas été visité ni ajouté à W on l'ajoute\n if(voisin not in W and cibles_visitées[voisin] == 0):\n W.append(voisin)\n '''On peut supprimer cette partie et ne rien faire si cette condition est vérifiée et le parcours va se faire grâce à W'''\n if(cibles_visitées[cible_courante] == 1): #Si la cible courante a été visitée (c.à.d elle a été k-couverte donc on va passer à une autre)\n v = adjacence_capt[cible_courante][0] #premier candidat à remplacer la cible courante (son premier voisin dans la liste d'adjacence de captation)\n m = len(adjacence_capt[cible_courante])\n i = 1\n while(i < m and cibles_visitées[v] == 1): #Si le candidat a été visité on parcours la liste de voisinage de captation jusqu'à ce qu'on trouve un voisin non visité pour remplacer la cible courante\n v = adjacence_capt[cible_courante][i]\n i += 1\n if(cibles_visitées[v] == 1 and len(W) > 0): #Dans le cas où on ne trouve aucun candidat non visité et si W n'est pas vide on parcours les éléments de W jusqu'à trouver une cible non visitée\n v = W.pop()\n while(cibles_visitées[v] == 1 and len(W) > 0): #Tant que le candidat courant et W contient toujours d'éléments on continue la recherche d'un remplaçant dans W\n v = W.pop()\n if(cibles_visitées[v] == 0): #Si la recherche termine et on trouve un candidat non visité on change la cible courante\n cible_courante = v\n if(cibles_visitées[v] == 1 and len(W) == 0): #Si la recherche termine car W est devenue vide sans trouver un candidat non visité alors le parcours termine\n return C, len(C), len(D)\n else:\n cible_courante = v\n \n \n if(cible_courante not in D): #Si la cible courante n'est pas dans D (alors n'est pas visitée)\n \n \n s = 0 #variable qui va contenir le nombre de voisins de captation de cible courante qui sont des capteurs (donc le degré de captation de cible courante)\n for voisin in adjacence_capt[cible_courante]:\n if(voisin in C):\n s += 1\n \n \n if(cible_courante in C): #On prend en considération la cible courante dans le degré de captation si elle a un capteur\n s += 1\n \n if(s < k): #Si s < k alors la cible courante n'est pas k-couverte\n \n voisins_capt = adjacence_capt[cible_courante].copy()\n voisins_capt.append(cible_courante) #On ajoute la cible courante à la liste des voisins de captation de la cible courante pour vérifier qu'elle est aussi captée\n \n \n t = 0\n nouv_capt = []\n while(t < k-s):\n voisins_qualités = {} #dictionnaire qui va associer à chaque voisin de captation de la cible courante et à la cible courante elle-même la qualité\n for elem in voisins_capt:\n\n if(elem not in C): #On cherche la cible de meilleure qualité pour mettre le capteur alors on doit vérifier qu'il n'y a pas un capteur déjà installé sur cette cible\n \n if(cibles_vois_c_com[elem]>0): #pour assurer la communication (ceci signifie que pour l'élément considéré il a au moins un voisin de communication dans C ou il peut communiquer directement avec le puits)\n \n voisins_qualités[elem] = qualite(elem, adjacence_capt, D)\n \n \n ordered_voisins_qualités = OrderedDict(sorted(voisins_qualités.items(),\n key=lambda kv: kv[1], reverse=True)) #tri par ordre décroissant de qualité\n \n nc = list(ordered_voisins_qualités.keys())[0] #on choisit la cible ayant la meilleure qualité\n\n \n nouv_capt.append(nc)\n C.append(nc) #on ajoute ces capteurs à C\n ajouter_elem_a_dict_type2(cibles_vois_c_com, nc) #car le capteur est dans C\n for v_c2 in adjacence_com[nc]: #pour tout voisin de communication du capteur on incrémente le nombre de capteurs de C avec lesquels ce voisin peut communiquer\n ajouter_elem_a_dict_type2(cibles_vois_c_com, v_c2)\n for v_c in adjacence_capt[nc]: #pour chacun de ces voisins de captation\n ajouter_elem_a_dict_type1(capteurs_cibles, nc, v_c) #on ajoute ce capteur à la liste des capteurs de ce voisin\n \n ajouter_elem_a_dict_type2(cibles_nombre_capteurs, v_c) #et on incrémente le nombre de capteurs captant ce voisin dans cibles_nombre_capteurs\n \n if(cibles_nombre_capteurs[v_c] == k): #puis on vérifie si ce voisin est k-couvert et s'il l'est on l'ajoute dans D\n D.add(v_c)\n \n ajouter_elem_a_dict_type1(capteurs_cibles, nc, nc) #on fait le même pour le capteur lui même\n ajouter_elem_a_dict_type2(cibles_nombre_capteurs, nc)\n if(cibles_nombre_capteurs[nc] == k):\n D.add(nc)\n t += 1\n \n \n \n D.add(cible_courante) #cible courante est k-couverte donc on l'ajoute dans D\n \n cibles_visitées[cible_courante] = 1 #On indique qu'on a visité la cible courante\n \n if(len(W) == 0): #si W est vide alors le parcours termine\n return C, len(C), len(D)\n else: #sinon on met à jour la cible courante pour qu'elle soit le dernier élément de W et on revient à la boucle while principale\n cible_courante = W.pop()\n return C, len(C), len(D)\n\n\n\n\n#Description de la fonction degre_couv:\n'''Cette fonction prend en entrée une cible, le dictionnaire représentant la liste d'adjacence de captation du graphe, et une liste de capteurs et retourne pour une cible donnée le degré de couverture de cette cible'''\ndef degre_couv(cible, adj_capt, C):\n s = 0\n for elem in adj_capt[cible]:\n if(elem in C):\n s += 1\n if(cible in C):\n s += 1\n return s\n\n#Description de la fonction construire_chemin:\n'''Cette fonction prend en entrée le dictionnaire représentant la liste d'adjacence de communication et la liste des capteurs pour retourner deux dictionnaires :\n - Le premier affecte à chaque capteur le capteur prédécesseur\n - Le deuxième affecte à chaque capteur la liste de successeurs'''\ndef construire_chemin(adj_com, C):\n capt_a_0 = C[0] #On veut que la première cible du chemin soit le puits donc on cherche le deuxième capteur de façon à ce qu'il soit un voisin de communication du puits\n i = 0\n while(i < len(C)):\n if((\"0.00\", \"0.00\") in adj_com[C[i]]):\n capt_a_0 = C[i]\n break\n i += 1\n \n succ ={} #dictionnaire qui va contenir les successeurs\n for elem in C: #initialisation de succ\n succ[elem] = []\n \n succ[(\"0.00\", \"0.00\")] = [capt_a_0] #On a déjà trouvé l'un des successeurs du puits\n pred = {capt_a_0 : (\"0.00\", \"0.00\")} #et alors le prédécesseur de ce successeur est le puits\n ajouté = {} #dictionnaire indiquant pour chaque capteur de C si ce capteur a été ajouté dans le chemin (autrement : il a un prédécesseur ajouté)\n for elem in C: #initialisation de ajouté\n ajouté[elem] = False\n ajouté[capt_a_0] = True #on a ajouté l'un des successeurs du puits\n nombre_restant = len(C) - 1 #le nombre restant des capteurs à ajouter dans le chemin\n i = 0\n while(nombre_restant > 0): #tant qu'il y a des capteurs non ajoutés dans le chemin\n #print(nombre_restant)\n if(ajouté[C[i]] == False): #si le capteur i n'est pas ajouté\n if(C[i] in adj_com[(\"0.00\", \"0.00\")]): #s'il peut communiquer directement avec le puits\n ajouté[C[i]] = True #on l'ajoute\n pred[C[i]] = (\"0.00\", \"0.00\") #on indique que son prédécesseur est le puits\n succ[(\"0.00\", \"0.00\")].append(C[i]) #on l'ajoute à la liste de successeurs du puits\n nombre_restant -= 1 #et on décrémente le nombre de capteurs restants\n \n else: #sinon\n for voisin in adj_com[C[i]] : #pour chaque voisin de communication de ce ce capteur\n \n if(voisin in C): #si le voisin est dans C\n \n if(ajouté[voisin] == True): #et si le voisin a été ajouté dans le chemin\n ajouté[C[i]] = True #on ajoute le capteur dans le chemin\n pred[C[i]] = voisin #on précise que le prédécesseur du capteur est ce voisin\n succ[voisin].append(C[i]) #et on ajoute le capteur à la liste des successeurs du voisin\n nombre_restant -= 1 #finalement on décrémente le nombre restant\n break #on sort de la boucle quand on ajoute le capteur\n i = (i+1)%len(C) #tant qu'il y a des capteurs non ajoutés on va parcourir C pour pouvoir les ajouter\n return pred, succ\n\n\n\n\n\n#Description de la fonction est_connexe:\n'''Cette fonction prend en entrée : une liste de capteurs et le dictionnaire représentant la matrice d'adjacence de communication du graphe et retourne True si les capteurs avec le puits sont connexes et False sinon'''\ndef est_connexe(C, adj_com):\n liste_capteurs_parcourus = [(\"0.00\", \"0.00\")]\n capteurs_visités = {}\n for elem in C:\n capteurs_visités[elem] = False\n pile = []\n for elem in adj_com[(\"0.00\", \"0.00\")]:\n if(elem in C):\n pile.append(elem)\n while(len(pile) > 0):\n capteur_courant = pile.pop()\n liste_capteurs_parcourus.append(capteur_courant)\n capteurs_visités[capteur_courant] = True\n for voisin in adj_com[capteur_courant]:\n if(voisin in C):\n if(capteurs_visités[voisin] == False and voisin not in pile):\n pile.append(voisin)\n if(len(liste_capteurs_parcourus) <= len(C)):\n return False\n else:\n return True\n \n \n \n \n \n###Description de la fonction post_traitement :\n'''C'est une fonction qui prend en entrée un dictionnaire représentant la liste d'adjacence de captation, un autre représentant la liste d'adjacence de communication, le degré de couverture k, le dictionnaire de prédécesseurs\net le dictionnaire de successeurs définissant le chemin de communication, une liste de capteurs et la version (1 pour la 1ere et 2 pour la 2eme utilisant la condition de connexité (améliorée)).\nCette fonction parcourt les cibles et élimine les capteurs inutiles c.à.d les capteurs dont la supression n'affecte pas la réalisabilité de la solution.\nElle retourne la nouvelle liste de capteurs.'''\ndef post_traitement(adj_capt, adj_com, k, preds, succs, C, version):\n nouveau_C = C.copy()\n \n for capteur in C: #Pour chaque capteur dans C\n inutile = True\n if(degre_couv(capteur, adj_capt, nouveau_C) > k) : #si le capteur est couvert par un nombre de capteurs > k\n for cible in adj_capt[capteur]: #pour chaque cible captée par ce capteur\n if(degre_couv(cible, adj_capt, nouveau_C) == k): #si on a une cible qui est captée exactement par k capteurs alors ce capteur est utile\n inutile = False\n break\n else: #si le capteur est capté exactement par k capteurs alors il est utile\n inutile = False\n \n if(inutile == True) : #si le capteur et ces voisins de captations sont tous captés par > k capteurs\n if(version == 1):\n #1ere version\n pred = preds[capteur] #pred est le prédécesseur du capteur dans le chemin\n succ = succs[capteur] #succ est la liste des successeurs du capteur dans le chemin\n for elem in succ: #pour chaque successeur du capteur\n if(elem not in adj_com[pred]): #si le successeur ne peut pas communiquer avec pred alors le capteur est utile pour la communication (on peut faire mieux et vérifier la communication du successeur avec tous les preds de pred)\n inutile = False\n break\n if(version == 2):\n #2eme version\n C2 = nouveau_C.copy()\n C2.remove(capteur)\n if(est_connexe(C2, adj_com) == False):\n inutile = False\n ###\n \n \n \n if(inutile == True): #si le capteur est inutile\n nouveau_C.remove(capteur)\n \n return nouveau_C\n\n\n\n\n\n###Description de la fonction supprimer_ajouter :\n'''C'est une fonction qui prend en entrée un capteur (paire de coordonnées), une liste de capteurs, un dictionnaire représentant une liste d'adjacence de captation, un autre représentant une liste d'adjacence de communication, un dictionnaire de prédécesseurs et de successeurs décrivant un chemin de communication, le degré de couverture k et la version utilisée (1 pour la 1ere et 2 pour la version améliorée utilisant la connexité).\nCette fonction permet de construire un élément du voisinage en supprimant un capteur et ajoutant un autre de façon à ce que la réalisabilité est conservée.\nElle retourne un tuple où le premier élément indique la réalisabilité de la nouvelle solution, et le deuxième la nouvelle liste de capteurs.'''\ndef supprimer_ajouter(capteur, C, adj_capt, adj_com, pred, succ, k, version):\n nouv_C = C.copy()\n à_couvrir = [] #va contenir les capteurs qu'on doit couvrir une fois on supprime le capteur en entrée\n for voisin_capt in adj_capt[capteur]: #pour chaque voisin de captation de capteur\n if(degre_couv(voisin_capt, adj_capt, nouv_C) == k): #si le voisin est capté par par exactement k capteurs alors quand on va supprimer capteur on doit le couvrir par le nouveau capteur ajouté\n à_couvrir.append(voisin_capt) #on l'ajoute alors à la liste à couvrir\n if(degre_couv(capteur, adj_capt, nouv_C) == k): #si le capteur lui-même est capté exactement par k capteur\n à_couvrir.append(capteur) #on l'ajoute à la liste à couvrir\n \n \n '''l = list(adj_capt.keys())\n if((\"0.00\", \"0.00\") in l):\n l.remove((\"0.00\", \"0.00\"))\n for voisin_capt in l:''' #un autre choix autre que celui ci dessous peut être parmi toutes les cibles mais l'autre donne de meilleures solutions\n for voisin_capt in adj_capt[capteur]: # on choisit le nouveau capteur parmi les voisins du capteur à supprimer\n valide = True #indique la validité du voisin pour être le nouveau capteur\n if(voisin_capt not in C): #si le voisin n'est pas dans C\n \n #2eme version\n if(version == 2):\n C2 = C.copy()\n C2.remove(capteur)\n C2.append(voisin_capt)\n if(est_connexe(C2, adj_com)):\n valide = True\n else:\n valide = False\n ###\n #1ere version\n if(version == 1):\n if(pred[capteur] in adj_com[voisin_capt] or voisin_capt in adj_com[(\"0.00\", \"0.00\")]): #si le voisin peut communiquer avec le prédécesseur du capteur à supprimer ou directement avec le puits (on peut faire mieux)\n for elem in succ[capteur]: #pour chaque successeur du capteur\n \n if(elem not in adj_com[voisin_capt]): #si le successeur ne peut pas communiquer avec le nouveau capteur (on peut faire mieux)\n valide = False #alors le voisin n'est pas valide\n \n \n \n \n else: #si le voisin ne peut pas communique avec le prédécesseur de capteur ou avec le puits\n \n valide = False #alors il n'est pas valide\n else: #si le voisin est dans C déjà alors il n'est pas valide\n valide = False\n \n if(valide == True): #si le voisin conserve la communication\n for elem in à_couvrir: #pour tout élément de à couvrir\n \n if(elem not in adj_capt[voisin_capt] and elem != voisin_capt): #si pour un élément différent du voisin cet élément n'est pas couvert par le voisin\n valide = False #alors le voisin n'est pas valide\n break\n if(valide == True): #si le voisin est valide alors\n nouv_C.remove(capteur) #on supprime le capteur\n nouv_C.append(voisin_capt) #on ajoute le voisin\n \n break\n return (valide, nouv_C)\n\n\n\n\n\n###Descritption de la fonction voisinage :\n'''Cette fonction prend en entrée une liste de capteurs, un dictionnaire représentant la liste d'adjacence de captation, un autre représentant la liste d'adjacence de communication, un dictionnaire de pédécesseurs et de successeurs définissant un chemin de communication, le degré de couverture k et la version utilisée pour le post-traitement et la création d'un voisinage (1 pour la 1ere et 2 pour l'améliorée)\nCette fonction génère à partir d'une solution réalisable le voisinage formé par les éléments transformés et valides à partir de C.\nElle retourne une liste de listes de capteurs définissant le voisinage, un dictionnaire associant pour chaque élément de voisinage (par son indice) sa valeur, une liste des capteurs supprimés pour chaque élément du voisinage.'''\ndef voisinage(C, adj_capt, adj_com, pred, succ, k, version):\n voisinage = [] #va contenir des listes formant le voisinage de C\n valeurs_voisinage = {} #va contenir la valeur de chaque élément de voisinage après traitement\n #minimum = len(C) #variable qui va contenir la valeur minimale dans le voisinage\n #final_C = []\n capteurs_supprimés = []\n for capteur in C: #pour chaque capteur dans C\n existe, nouv_C = supprimer_ajouter(capteur, C, adj_capt, adj_com, pred, succ, k, version) #on effectue la transformation supprimer_ajouter\n if(existe == True): #si la transformation donne une liste rélisable\n nouv_pred, nouv_succ = construire_chemin(adj_com, nouv_C) #on construit le chemin de la transformation\n final_C = post_traitement(adj_capt, adj_com, k, nouv_pred, nouv_succ, nouv_C, version) #puis on élimine les capteurs inutiles\n \n \n voisinage.append(final_C) #on ajoute le résultat final dans le voisinage\n index = len(voisinage) - 1 #c'est l'indice de la liste ajoutée dans voisinage\n valeurs_voisinage[index] = len(final_C) #on ajoute la valeur de cette liste dans valeurs_voisinage\n capteurs_supprimés.append(capteur)\n return voisinage, valeurs_voisinage, capteurs_supprimés\n\n\n\n\n\n###Description de la fonction parcours_voisinage :\n'''C'est une fonction qui prend en entrée une liste de capteurs, un dictionnaire représentant la liste d'adjacence de captation, un autre représentant la liste d'adjacence de communication, le degré de couverture k, la taille de la liste tabou, le nombre des itérations maximal (ou le temps d'exécution maximal (selon la condition)), la condition qui est un entier (si sa valeur est 1 on active la condition d'arrêt temporel sinon on laisse la condition d'arrêt selon les itérations), la version qui est un entier = 1 ou 2 (1 pour effectuer le post traitement et la création du voisinage en utilisant la première et 2 pour le faire en utilisant la première)\nCette fonction parcourt les voisinages tout en utilisant une liste tabou pour interdire le parcours de certains voisinages.\nElle retourne le meilleur chemin trouvé lors du parcours, sa valeur, le nombre de voisinages explorés jusqu'à la meilleure solution, la valeur initiale, et une liste des minimums rencontrés à chaque itération.'''\ndef parcours_voisinages(C, adj_capt, adj_com, k, taille_tabou, nb_itérations, condition, version):\n liste_tabou = []\n i = 0\n meilleur_chemin = C.copy()\n chemin_courant = C.copy()\n minimum = len(C) #va contenir le minimum par rapport à tous les voisinages\n meilleur_index = 0\n minimums = []\n meilleur_it = 0\n #pour la condition d'arrêt temporelle\n if(condition == 1):\n t1 = time.time()\n t2 = 3\n i = 0\n r = 0\n while(i < nb_itérations): #on explore au maximum nb_itérations voisinages\n minimums.append(minimum)\n \n if(vérifier_réalisabilité(adj_capt, adj_com, chemin_courant, k) == False):\n print(chemin_courant)\n print(\"**************************\")\n print(\"Erreur : solution non réalisable\")\n \n pred, succ = construire_chemin(adj_com, chemin_courant) #on trouve le chemin de la solution courante\n vois, vals, capteurs_supprimés = voisinage(chemin_courant, adj_capt, adj_com, pred, succ, k, version) #on trouve le voisinage de la solution courante\n j = 0\n vois2 = vois.copy()\n capteurs_supprimés2 = capteurs_supprimés.copy()\n while(j < len(vois)): #pour chaque liste du voisinage\n for capt in liste_tabou: #pour chaque capteur dans la liste tabou\n #print(len(capteurs_supprimés))\n #print(len(vois))\n if(capt in vois[j]): #si le capteur est dans la listes\n vois2.remove(vois[j]) #on supprime cet élément de la liste du voisinage\n capteurs_supprimés2.remove(capteurs_supprimés[j])\n break\n j += 1\n vois = vois2.copy()\n capteurs_supprimés = capteurs_supprimés2.copy()\n e = 0\n vals = {} #on a modifié le voisinage don on doit mettre à jour le dictionnaire des valeurs\n while(e < len(vois)):\n vals[e] = len(vois[e])\n e += 1\n if(len(vois) == 0): #si le voisinage est vide on arrête les itérations\n break\n min_local = len(vois[0]) #va contenir le minimum local dans un voisinage\n min_index = 0\n for index in vals: #si la valeur d'un élément est < min_local alors on met min_local à jour\n if(vals[index] < min_local):\n min_local = vals[index]\n min_index = index\n if(min_local < minimum): #si le min_local < minimum on met minimum à jour\n minimum = min_local\n meilleur_chemin = vois[min_index].copy()\n meilleur_it = r\n if(len(liste_tabou) < taille_tabou): #traitement FIFO pour la liste tabou\n liste_tabou.append(capteurs_supprimés[min_index])\n else:\n liste_tabou.remove(liste_tabou[0])\n liste_tabou.append(capteurs_supprimés[min_index])\n chemin_courant = vois[min_index].copy()\n \n \n if(condition == 1):\n t2 = time.time()\n if(t2 - t1 > nb_itérations):\n break\n else:\n i += 1\n r+=1\n return meilleur_chemin, minimum, meilleur_it, len(C), minimums\n\n\n\n\n\n###Description de la fonction vérifier_réalisabilité :\n'''C'est une fonction qui prend en entrée un dictionnaire représentant la liste d'adjacence de captation, un autre représentant la liste d'adjacence de communication, une liste de capteurs et le degré de couverture k.\nElle retourne True si C est réalisable et False sinon.'''\ndef vérifier_réalisabilité(adj_capt, adj_com, C, k):\n liste_cibles = list(adj_capt.keys())\n if((\"0.00\", \"0.00\") in liste_cibles):\n liste_cibles.remove((\"0.00\", \"0.00\"))\n for elem in liste_cibles:\n s = 0\n for elem2 in adj_capt[elem]:\n if(elem2 in C):\n s = s + 1\n if(elem in C):\n s = s + 1\n if(s < k):\n return False\n a = []\n for elem in C:\n for elem2 in adj_com[elem]:\n if(elem2 in C):\n a.append(elem)\n break\n if(elem in adj_com[(\"0.00\", \"0.00\")]):\n a.append(elem)\n if(set(a) != set(C)):\n return False\n return True\n\n\n### Description de la fonction read_data :\n'''Fonction qui prend en entrée le path du fichier des données et retourne une liste des cibles (une cible est une paire de coordonnées)'''\ndef read_data(filepath):\n data = pd.read_csv(filepath, header = None, names = [\"coord\"]) #il faut mettre les données dans le même folder que ce code\n cibles = []\n i = 0\n while(i < len(data)):\n cible = (data.loc[:]['coord'][i].split()[1], data.loc[:]['coord'][i].split()[2])\n if(i == len(data) - 1): #On considère ce cas pour éliminer le ; de la dernière ligne du fichier\n cible = (cible[0], cible[1][:len(cible[1]) - 1])\n cibles.append(cible)\n i += 1\n return cibles\n\n\n###Description de la fonction adjacence :\n'''Cette fonction prend en entrée la liste des cibles, le rayon de captation et le rayon de communication et retourne deux listes d'adjacence sous forme de 2 dictionnaires :\n - Le premier associe à chaque cible (la clé) c1 une liste des cibles (la valeur) qui sont à une distance <= Rcapt de c1\n - Le deuxième associe à chaque cible (la clé) c1 une liste des cibles (la valeur) qui sont à une distance <= Rcom de c1\n'''\n#Note : Comme (0.00, 0.00) (le puits) est une cible particulière qui n'a pas besoin d'être k-couverte, on choisit de ne pas l'inclure dans les listes des valeurs même si elle satisfait les conditions nécéssaires pour y être\n#Toutefois elle est nécessairement présente comme clé dans la liste d'adjacence du graphe de communication car elle communique toujours avec au moins une autre cible\ndef adjacence(cibles, Rcapt, Rcom):\n adjacence_capt = {}\n adjacence_com = {}\n for cible in cibles:\n if(cible != (\"0.00\", \"0.00\")):\n adjacence_capt[cible] = []\n adjacence_com[cible] = []\n for cible1 in cibles:\n for cible2 in cibles:\n if(distance(cible1, cible2) <= Rcapt and cible1 != cible2 and cible2 != (\"0.00\",\"0.00\")): #On n'inclut pas la cible elle-même dans sa liste des voisins\n ajouter_elem_a_dict_type1(adjacence_capt, cible1, cible2)\n ajouter_elem_a_dict_type1(adjacence_com, cible1, cible2)\n elif(distance(cible1, cible2) <= Rcom and cible1 != cible2 and cible2 != (\"0.00\",\"0.00\")): #On n'inclut pas la cible elle-même dans sa liste des voisins\n ajouter_elem_a_dict_type1(adjacence_com, cible1, cible2)\n return (adjacence_capt, adjacence_com)\n\n\n\n\n\n### Description de la fonction distance :\n'''Cette fonction prend en entrée deux cibles (une paire de coordonnées) et retourne la distance euclidienne entre ces deux cibles'''\ndef distance(cible1, cible2):\n return(math.sqrt((float(cible1[0]) - float(cible2[0]))**2+(float(cible1[1]) - float(cible2[1]))**2))\n\n\n\n### Description de la fonction ajouter_elem_a_dict_type1 :\n'''C'est une fonction qui prend en entrée un dictionnaire d, un élément qu'on veut ajouter comme clé (ou modifier sa valeur) au dictionnaire et la valeur qu'on veut ajouter à la liste valeur de la clé (pas besoin de retourner, le dictionnaire\nest une structure immutable qui préserve la modification même si celle là a été effectuée dans une fonction)'''\ndef ajouter_elem_a_dict_type1(d, elem, val):\n if(elem in d):\n d[elem].append(val)\n else:\n d[elem] = [val]\n\n\n\n\n\n### Description de la fonction ajouter_elem_a_dict_type2 :\n'''C'est une fonction qui prend en entrée un dictionnaire d, un élément qu'on veut ajouter comme clé (ou modifier sa valeur) au dictionnaire et ajoute 1 à sa valeur'''\ndef ajouter_elem_a_dict_type2(d, elem):\n if(elem in d):\n d[elem] += 1\n else:\n d[elem] = 1\n\n\n\n\n\n### Description de la qualite :\n'''Cette fonction prend en entrée une cible (paire de coordonnées), un dictionnaire qui représente la liste d'adjacence du graphe de captation et un ensemble D qui contient les cibles qui sont k-couvertes.\nElle retourne la qualité d'une cible qui est représentée par le nombre de voisins qui ne sont pas k-couverts en comptant en plus la cible elle-même'''\ndef qualite(cible, adj_capt, D):\n qual = 0\n if(cible not in D): #Si la cible n'est pas couverte elle contribue à sa qualité\n qual += 1\n for voisin in adj_capt[cible]:\n if(voisin not in D):\n qual += 1\n return qual\n\n\n\n\n","repo_name":"Judafa/Projet_MetaHeuristiques","sub_path":"Algos/Algo_Tabou2.py","file_name":"Algo_Tabou2.py","file_ext":"py","file_size_in_byte":35861,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73001697973","text":"# coding:utf-8\n# @Time : 2022/6/14 18:53 \n# @Author : clf\n# @File : demo1.py.py \n# @Software: PyCharm\ndef calc(a,b):\n c=a+b\n return c\n\nresult=calc(10,20)\nprint(result)\n\nres=calc(b=10,a=20)\nprint(res)","repo_name":"ChenLiufeng/PythonPrograming","sub_path":"chap8/demo1.py","file_name":"demo1.py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2309682730","text":"from __future__ import print_function, absolute_import, division, unicode_literals, with_statement # Python 2 compatibility\n\nimport numpy as np\nfrom skimage.feature import register_translation\nfrom itertools import combinations\nfrom scipy.io import wavfile\nfrom egocom import audio\n\n\n# In[2]:\n\n\ndef gaussian_kernel(kernel_length=100, nsigma=3):\n '''Returns a 2D Gaussian kernel array.\n \n Parameters\n ----------\n kernel_length : int\n The length of the returned array.\n \n nsigma : int\n The # of standard deviations around the mean to compute the Gaussian shape.'''\n \n from scipy.stats import norm\n \n interval = (2*nsigma+1.)/(kernel_length)\n x = np.linspace(-nsigma-interval/2., nsigma+interval/2., kernel_length+1)\n kern1d = np.diff(norm.cdf(x))\n kernel_raw = np.sqrt(kern1d)\n kernel = kernel_raw/kernel_raw.sum()\n return kernel\n\ndef norm_signal(arr, samplerate = 44100, window_size = 0.1, also_return_divisor = False):\n '''Returns a locally-normalized array by dividing each point by a the \n sum of the points around it, with greater emphasis on the points \n nearest (using a Guassian convolution)\n \n Parameters\n ----------\n arr : np.array\n samplerate : int\n window_size : float (in seconds)\n \n Returns\n -------\n A Guassian convolution locally normalized version of the input arr'''\n \n kern = gaussian_kernel(kernel_length=int(samplerate * window_size), nsigma=3)\n local_power = np.convolve(arr, kern, 'same')\n resp = arr / local_power\n return resp, local_power if also_return_divisor else resp\n\n\n# In[3]:\n\n\ndef verify_alignments_for_three_wavs(\n shift_wav1_to_wav2,\n shift_wav2_to_wav3,\n shift_wav1_to_wav3,\n nearness_in_seconds = 0.1,\n samplerate = 44100,\n):\n '''Verifies that alignment results agree for three wavs\n e.g. shift from wav1 to wav2 + shift from wav2 to wav3\n should be near shift from wav1 to wav 3'''\n \n threshold = samplerate * nearness_in_seconds\n assert(abs(shift_wav1_to_wav2 + shift_wav2_to_wav3 - shift_wav1_to_wav3) < \n threshold)\n\n\n# In[4]:\n\n\ndef align_wavs(wav_list, samplerate = 44100, samples_at_end_to_ignore = 10):\n '''Automatically aligns a list of stereo (2-channel) wav np.arrays'''\n \n num_wavs = len(wav_list)\n # Avoid artifacts that may exist in the last samples_at_end_to_ignore samles\n length = min(len(w) for w in wav_list) - samples_at_end_to_ignore\n # Make all wav files the same length\n wavs = [abs(z)[:length] for z in wav_list]\n # Normalize locally\n wavs = [np.apply_along_axis(lambda x: norm_signal(x), axis = 0, arr = z) for z in wavs]\n # Normalize globally\n wavs = [audio.norm_center_clip(z) for z in wavs]\n \n shifts_relative_to_first_wav = [0]\n for w1, w2 in combinations(wavs, 2):\n # Compute the shifts for all combinations of left/right audio streams from both wav files\n combs = [(0, 0), (0, 1), (1, 0), (1, 1)]\n shifts = [-1 * register_translation(w1[:, a], w2[:, b])[0][0] for a,b in combs]\n shift = int(np.median(shifts))\n if len(shifts_relative_to_first_wav) < num_wavs:\n shifts_relative_to_first_wav.append(shift)\n elif num_wavs == 3:\n verify_alignments_for_three_wavs(\n shift_wav1_to_wav2 = shifts_relative_to_first_wav[-2],\n shift_wav2_to_wav3 = shift,\n shift_wav1_to_wav3 = shifts_relative_to_first_wav[-1],\n )\n alignment = np.array(shifts_relative_to_first_wav) - min(shifts_relative_to_first_wav)\n \n return alignment\n\n\n# In[5]:\n\n\ndef create_combined_wav_audio_sample(\n wav_list, \n samplerate = 44100,\n alignment = None,\n wfn = \"output.wav\", # WriteFileName\n nbits = 16, \n force_mono = False,\n):\n '''Combines the wav files after aligning \n so you can listen and see if they are aligned.'''\n \n if alignment is None:\n alignment = [0] * len(wav_list)\n # Align wav files\n aligned_wavs = [wav[alignment[i]:] for i, wav in enumerate(wav_list)]\n # Make all wav files normalized and the same length.\n duration = min([len(w) for w in aligned_wavs])\n y = sum([audio.norm_center_clip(z[:duration]) for z in aligned_wavs])\n write_wav(y, samplerate, wfn, nbits, force_mono)\n\n\n# In[6]:\n\n\ndef write_wav(\n wav, \n samplerate = 44100,\n wfn = \"output.wav\", # WriteFileName\n nbits = 16, \n force_mono = False,\n):\n '''Writes a wav file to directory wfn'''\n # Normalize and reduce to mono if needed -- required by Google Speech-to-Text\n y = audio.norm_center_clip(wav.sum(axis=1) if force_mono else wav)\n # Set bitsize of audio. \n y_int = ((2**(nbits - 1) - 1) * y).astype(eval(\"np.int\" + str(nbits)))\n # Write file to the WriteFileName specified by wfn\n wavfile.write(wfn, samplerate, y_int)\n\n","repo_name":"facebookresearch/EgoCom-Dataset","sub_path":"egocom/multi_array_alignment.py","file_name":"multi_array_alignment.py","file_ext":"py","file_size_in_byte":4852,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"21"} +{"seq_id":"36799668543","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport threading\nimport time\n\ncount = 0\n\n# Time定时器任务\ndef show_time():\n global count\n if count < 10:\n print(\"time:\", time.ctime())\n count += 1\n time_task()\n else:\n print(\"Done\")\n\n\ndef time_task():\n t = threading.Timer(10, show_time)\n t.start()\n\ntime_task()\n","repo_name":"diaoyuqiang/python","sub_path":"threading_/threading_Tm.py","file_name":"threading_Tm.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1800269443","text":"import random\n\nclass text:\n\n\tdef __init__(self, framework):\n\t\t\n\t\tself.fw = framework\n\t\tself.formattedString = \"\"\n\t\tself.specialchars = [\" \", \",\", \"@\", \"#\", \"$\", \"%\", \"^\", \"&\", \"=\",\"[\", \"]\", \"-\", \"{\", \"}\", \"\\\\\", \"/\", \"*\", \"!\", \".\", \"_\", \"(\", \")\", \"+\", \"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]\n\t\tself.name = \"Text\"\n\t\tself.description = \"Library to provide generic text manipulation and encoding functions\"\n\n\n\t\"\"\"\n\t\twarpText(string)\n\n\t\trandomly capitalizes strings to attempt WAF evasion\n\t\"\"\"\n\tdef warpText(self, string):\n\n\t\tself.formattedString = \"\"\n\t\tstring = string.replace(\"\\n\", \"\")\n\t\twhile string[len(string)-1] == \" \": string = string[:-1]\n\t\twhile string[0] == \" \": string = string[1:]\n\n\t\tsentence = string.split(\" \")\n\n\t\tfor i in range(len(sentence)):\n\t\t\tspc_char = None\n\t\t\tunique = 0\n\t\t\twhile unique == 0:\n\t\t\t\tobfWord = \"\"\n\t\t\t\tfor x in range(len(sentence[i])):\n\t\t\t\t\tspc_char = 0\n\n\t\t\t\t\tif sentence[i][x] in self.specialchars:\n\t\t\t\t\t\tobfWord += sentence[i][x]\n\t\t\t\t\t\tspc_char = 1\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tcap = random.randint(0,1)\n\t\t\t\t\tif cap:\n\t\t\t\t\t\tobfWord += sentence[i][x].upper()\n\t\t\t\t\telse:\n\t\t\t\t\t\tobfWord += sentence[i][x].lower()\n\t\t\t\tif len(obfWord) == 1 or (obfWord != sentence[i].lower() and obfWord != sentence[i].upper()) or spc_char == 1:\n\t\t\t\t\tunique = 1\n\t\t\t\t\tself.formattedString += obfWord + \" \"\n\t\treturn self.formattedString[:-1]\n\t\"\"\"\n\tstrHex($string)\n\tconverts $string to its hex value\n\t\"\"\"\n\tdef strHex(self, string):\n\n\t\tself.formattedString = \"\"\n\t\tself.formattedString = string.encode(\"hex\").replace(\"\\n\", \"\")\n\n\t\treturn self.formattedString\n\n\t\"\"\"\n\thexStr($string)\n\tconverts the hex $string to its decimal equivalent, converts to char, and appends to a string.\n\t\"\"\"\n\tdef hexStr(self, string):\n\n\t\tself.formattedString = \"\"\n\t\tself.formattedString = string.decode(\"hex\").replace(\"\\n\", \"\")\n\n\t\treturn self.formattedString\n\n\t\t\"\"\"\n\tstrRot13($string)\n\tconverts $string to its rot13 value\n\t\"\"\"\n\tdef strRot13(self, string):\n\n\t\treturn self.rot13(string)\n\n\t\"\"\"\n\trot13Str($string)\n\tconverts the rot13 $string to plaintext\n\t\"\"\"\n\tdef rot13Str(self, string):\n\n\t\treturn self.rot13(string)\n\n\tdef rot13(self, string):\n\n\t\treturn string.encode(\"rot13\").replace(\"\\n\", \"\")\n\n\t\"\"\"\n\tstrBase64($string)\n\tconverts $string to its base64_encode value\n\t\"\"\"\n\tdef strBase64(self, string):\n\n\t\tself.formattedString = \"\"\n\t\tself.formattedString = string.encode(\"base64\").replace(\"\\n\", \"\")\n\n\t\treturn self.formattedString\n\n\t\"\"\"\n\tbase64Str($string)\n\tconverts the base64 $string to plaintext\n\t\"\"\"\n\tdef base64Str(self, string):\n\n\t\tself.formattedString = \"\"\n\t\tself.formattedString = string.decode(\"base64\").replace(\"\\n\", \"\")\n\n\t\treturn self.formattedString\n\n\n\t\"\"\"\n\tstrOct($string)\n\tconverts $string to its oct value\n\t\"\"\"\n\tdef strOct(self, string):\n\t\treturn False\n\tdef octStr(self, string):\n\t\treturn False\n\n\tdef sqlSpace(self, string):\n\n\t\tself.formattedString = \"\"\n\n\t\tself.formattedString = string.replace(\" \", \"/**/\").replace(\"+\", \"/**/\").replace(\"\\n\", \"\")\n\t\t\n\t\treturn self.formattedString\n","repo_name":"OpenWireSec/OpenWirePythonFramework","sub_path":"libs/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"4039732018","text":"A, B = map(int, input().split())\n\ndp = {0:0, 1:1}\n\ndef func(n):\n if n in dp: return dp[n]\n x = 2 ** (len(bin(n))-3)\n dp[n] = func(x-1) + func(n-x) + (n-x+1)\n return dp[n]\n\nprint(func(B) - func(A-1))","repo_name":"OdiSeU/BaekJoon","sub_path":"Baekjoon_Python/Baekjoon_Python/9527.py","file_name":"9527.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11013083575","text":"import os\nfrom geospark.core.enums import GridType, IndexType\nfrom geospark.core.formatMapper.shapefileParser import ShapefileReader\nfrom geospark.core.spatialOperator import JoinQuery\nfrom geospark.utils import KryoSerializer, GeoSparkKryoRegistrator\nfrom geospark.utils.adapter import Adapter\nfrom geospark.core import SpatialRDD\nfrom geospark.core.SpatialRDD import CircleRDD\nimport datetime\nfrom pyspark.sql import SparkSession\n# from geospark.register import upload_jars\nfrom geospark.register import GeoSparkRegistrator\nfrom colocation import write_to_file\n\n\ndef readWithDF(filename, spark, epsg3857):\n spatialDf = spark.read.format(\"csv\").option(\"delimiter\", \",\").option(\"header\", \"True\").load(filename)\n spatialDf.createOrReplaceTempView(\"spatialDf\")\n points = SpatialRDD.SpatialRDD()\n points.rawSpatialRDD = Adapter.toSpatialRdd(spark.sql(\n \"select ST_Point(cast(spatialDf.lat as Decimal(24, 14)), cast(spatialDf.lng as Decimal(24, 14))) from spatialDf\"))\n if epsg3857:\n points.CRSTransform(\"epsg:4326\", \"epsg:3857\")\n points.analyze()\n return points\n\n\ndef spatialJoinQueryUsingIndex(spark, poi_input_location, point_input_location, epsg3857, joinQueryPartitioningType,\n pointIndexType, radius):\n pois = readWithDF(poi_input_location, spark, epsg3857)\n pointRDD = readWithDF(point_input_location, spark, epsg3857)\n pointRDD.spatialPartitioning(joinQueryPartitioningType)\n pointRDD.buildIndex(pointIndexType, True)\n queryWindowRDD = CircleRDD(pois, radius)\n queryWindowRDD.analyze()\n queryWindowRDD.spatialPartitioning(pointRDD.getPartitioner())\n result = JoinQuery.SpatialJoinQuery(pointRDD, queryWindowRDD, True, False)\n write_to_file(result)\n\n\ndef distanceJoinQueryUsingIndex(spark, pointInputLocation, lineStringInputLocation, epsg3857, radius,\n joinQueryPartitioningType, pointIndexType):\n pointRDD = readWithDF(pointInputLocation, spark, epsg3857)\n lineStringRDD = ShapefileReader.readToGeometryRDD(spark.sparkContext, lineStringInputLocation)\n rawSpatialDf = Adapter.toDf(lineStringRDD, spark)\n rawSpatialDf.createOrReplaceTempView(\"rawSpatialDf\")\n rawSpatialDf = spark.sql(\"SELECT ST_GeomFromWKT(geometry) AS geom FROM rawSpatialDf\")\n rawSpatialDf.createOrReplaceTempView(\"rawSpatialDf\")\n rawSpatialDf.show()\n spatialDf = spark.sql(f\"SELECT ST_Buffer(geom, {radius}) AS buff FROM rawSpatialDf\")\n spatialDf.createOrReplaceTempView(\"spatialDf\")\n spatialDf.show()\n # queryWindowRDD = SpatialRDD.SpatialRDD()\n queryWindowRDD = Adapter.toSpatialRdd(spatialDf)\n if epsg3857:\n queryWindowRDD.CRSTransform(\"epsg:3857\", \"epsg:3857\")\n queryWindowRDD.analyze()\n pointRDD.spatialPartitioning(joinQueryPartitioningType)\n queryWindowRDD.spatialPartitioning(pointRDD.getPartitioner())\n pointRDD.buildIndex(pointIndexType, True)\n result = JoinQuery.SpatialJoinQuery(pointRDD, queryWindowRDD, True, False)\n write_to_file(result)\n\n\ndef distanceJoinQuerySQL(spark, pointInputLocation, lineStringInputLocation, start, end, radius, outputPath):\n pointDf, query = load_point(spark, pointInputLocation, \"point\", time=True, start=start, end=end)\n pointDf = spark.sql(query)\n pointDf.createOrReplaceTempView(\"pointDf\")\n pointDf.show()\n lineStringRDD = ShapefileReader.readToGeometryRDD(spark.sparkContext, lineStringInputLocation)\n lineStringDf = Adapter.toDf(lineStringRDD, spark)\n lineStringDf.createOrReplaceTempView(\"lineStringDf\")\n lineStringDf = spark.sql(\"SELECT *, ST_GeomFromWKT(geometry) AS geom FROM lineStringDf\")\n lineStringDf.createOrReplaceTempView(\"lineStringDf\")\n lineStringDf = spark.sql(f\"SELECT *, ST_Buffer(geom, {radius}) AS buff FROM lineStringDf\")\n lineStringDf.createOrReplaceTempView(\"bufferDf\")\n distJoin = spark.sql(\n \"SELECT ID, date, COUNT(*) as count FROM pointDf, bufferDf WHERE ST_Within(point, buff) GROUP BY ID, date ORDER BY date, ID\")\n distJoin.createOrReplaceTempView(\"distJoinDf\")\n distJoin.write.option(\"header\", \"True\").mode(\"overwrite\").csv(outputPath)\n\n\ndef distanceJoinPoints(spark, pointInputLocation, poiInputLocation, start, end, radius, outputPath):\n pointDf, query = load_point(spark, pointInputLocation, \"point\", time=True, start=start, end=end)\n pointDf = spark.sql(query)\n pointDf.createOrReplaceTempView(\"pointDf\")\n pointDf = spark.sql(\n \"SELECT point, date FROM pointDf WHERE ST_Within(point, ST_Transform(ST_GeomFromText('POLYGON((116.230633649 39.979627788,116.230633649 39.863608449,116.44120325 39.863608449,116.44120325 39.979627788,116.230633649 39.979627788))'),'epsg:4326', 'epsg:3857'))\")\n pointDf.show(truncate=False)\n poiDf, query = load_point(spark, poiInputLocation, \"poi\", time=False)\n poiDf = spark.sql(query)\n poiDf.createOrReplaceTempView(\"poiDf\")\n poiDf.show()\n distJoin = spark.sql(\n f\"SELECT date, COUNT(*) as Join_Count FROM pointDf, poiDf WHERE ST_Distance(point, poi) <= {radius} GROUP BY date ORDER BY date\")\n distJoin.createOrReplaceTempView(\"distJoinDf\")\n distJoin.write.option(\"header\", \"True\").mode(\"overwrite\").csv(outputPath)\n\n\ndef doubleJoin(spark, pointInputLocation, poiInputLocation, poiInputLocation2, start, end, radius, radius2, outputPath):\n poiDf, query = load_point(spark, poiInputLocation, \"poi\", time=False)\n poiDf = spark.sql(query)\n poiDf.createOrReplaceTempView(\"poiDf\")\n poiDf.show() # normal areas\n poiDf2, query = load_point(spark, poiInputLocation2, \"poi2\", time=False)\n poiDf2 = spark.sql(query)\n poiDf2.createOrReplaceTempView(\"poi2Df\")\n # distJoinPoi = spark.sql(f\"SELECT OBJECTID, poi, date FROM poiDf, poiDf2 WHERE ST_Distance(poi, poi2) <= {radius2}\")\n # distJoinPoi.createOrReplaceTempView(\"poiDf\")\n # distJoinPoi.show()\n distJoinPoi = spark.sql(f\"SELECT distinct OBJECTID FROM poiDf, poi2Df WHERE ST_Distance(poi, poi2) <= {radius2}\")\n distJoinPoi.show()\n print(distJoinPoi.count())\n # distJoinPoi = spark.sql(\"SELECT (UNIX_TIMESTAMP(poiDf.date,'yyyy-MM-dd')+864000)*1000 FROM poiDf\")\n # distJoinPoi.show()\n pointDf, query = load_point(spark, pointInputLocation, \"point\", time=True, start=start, end=end)\n pointDf = spark.sql(query)\n pointDf.createOrReplaceTempView(\"pointDf\")\n pointDf = spark.sql(\n \"SELECT point, date, sysTime FROM pointDf WHERE ST_Within(point, ST_Transform(ST_GeomFromText('POLYGON((116.230633649 39.979627788,116.230633649 39.863608449,116.44120325 39.863608449,116.44120325 39.979627788,116.230633649 39.979627788))'),'epsg:4326', 'epsg:3857'))\")\n pointDf.createOrReplaceTempView(\"pointDf\")\n pointDf.show(truncate=False)\n distJoin = spark.sql(\n f\"SELECT OBJECTID, pointDf.date, COUNT(*) as Join_Count FROM pointDf, poiDf WHERE ST_Distance(point, poi) <= {radius} AND sysTime>= (UNIX_TIMESTAMP(poiDf.date,'yyyy-MM-dd')-864000)*1000 AND sysTime <= (UNIX_TIMESTAMP(poiDf.date,'yyyy-MM-dd')+864000)*1000 GROUP BY OBJECTID, pointDf.date ORDER BY pointDf.date\")\n distJoin.createOrReplaceTempView(\"distJoinDf\")\n distJoin.write.option(\"header\", \"True\").mode(\"overwrite\").csv(outputPath)\n\n\ndef linestringBuffer(spark, lineStringInputLocation, radius, outputPath):\n lineStringRDD = ShapefileReader.readToGeometryRDD(spark.sparkContext, lineStringInputLocation)\n rawSpatialDf = Adapter.toDf(lineStringRDD, spark)\n rawSpatialDf.createOrReplaceTempView(\"rawSpatialDf\")\n query = \"SELECT ST_GeomFromWKT(geometry) AS geom FROM rawSpatialDf\"\n rawSpatialDf = spark.sql(query)\n rawSpatialDf.createOrReplaceTempView(\"rawSpatialDf\")\n spatialDf = spark.sql(f\"SELECT ST_Buffer(geom, {radius}) AS buff, geom FROM rawSpatialDf\")\n spatialDf.createOrReplaceTempView(\"spatialDf\")\n spatialDf.show()\n # spatialDf.rdd.map(lambda x: [str(x[0]), str(x[1])]).saveAsTextFile(outputPath + analysis + \"Result\")\n spatialDf.write.mode(\"overwrite\").json(outputPath + \"Result\")\n\n\ndef pointBuffer(spark, filename, lonOffset, latOffset, outputPath):\n spatialDf = spark.read.format(\"csv\").option(\"delimiter\", \",\").option(\"header\", \"False\").load(filename)\n spatialDf.createOrReplaceTempView(\"spatialDf\")\n spatialDf = spark.sql(\n f\"select ST_Point(cast(spatialDf._c{latOffset} as Decimal(24, 18)), cast(spatialDf._c{lonOffset} as Decimal(24, 18))) as geometry, spatialDf._c2 as name from spatialDf\")\n spatialDf.createOrReplaceTempView(\"spatialDf\")\n spatialDf = spark.sql(\n \"SELECT ST_Transform(geometry, 'epsg:4326','epsg:3857') AS buff, geometry, name FROM spatialDf\")\n spatialDf.createOrReplaceTempView(\"spatialDf\")\n spatialDf.rdd.map(lambda x: [str(x[0]), str(x[1])]).saveAsTextFile(outputPath + \"Result\")\n # spatialDf.rdd.sortBy(lambda x: str(x)).saveAsTextFile(outputPath + analysis + \"Result\")\n\n\ndef load_point(spark, inputLocation, name, time, start=None, end=None):\n pointDf = spark.read.format(\"csv\").option(\"delimiter1\", \",\").option(\"header\", \"True\").load(inputLocation)\n pointDf.createOrReplaceTempView(name + \"Df\")\n query = \"SELECT sysTime, ST_Transform(ST_Point(cast(lng AS Decimal(24, 14)), cast(lat AS Decimal(24, 14))), \" \\\n f\"'epsg:4326', 'epsg:3857') AS {name}\"\n if time:\n query += \",from_unixtime(sysTime/1000,'yyyy-MM-dd') AS date, from_unixtime(sysTime/1000,'HH:mm:ss') AS time \"\n query += f\"FROM {name + 'Df'} \"\n if start:\n query += f\" WHERE sysTime > UNIX_TIMESTAMP('{start.strftime('%Y-%m-%d')}','yyyy-MM-dd') * 1000 AND sysTime < UNIX_TIMESTAMP('{end.strftime('%Y-%m-%d')}','yyyy-MM-dd') * 1000\"\n return pointDf, query\n\n\ndef join_labeled(spark, pointInputLocation, poiInputLocation, start, end, outputPath):\n poiRDD = ShapefileReader.readToGeometryRDD(spark.sparkContext, poiInputLocation)\n poiDf = Adapter.toDf(poiRDD, spark)\n poiDf.createOrReplaceTempView(\"poiDf\")\n poiDf = spark.sql(\"SELECT ST_GeomFromWKT(geometry) AS geom FROM poiDf\")\n poiDf.createOrReplaceTempView(\"poiDf\")\n pointDf, query = load_point(spark, pointInputLocation, \"point\", time=True, start=start, end=end)\n pointDf = spark.sql(query)\n pointDf.createOrReplaceTempView(\"pointDf\")\n distJoin = spark.sql(\n f\"SELECT date, COUNT(*) as Join_Count FROM pointDf, poiDf WHERE HOUR(time)=8 AND ST_Within(point, geom) GROUP BY date ORDER BY date\")\n distJoin.write.option(\"header\", \"True\").mode(\"overwrite\").csv(outputPath)\n","repo_name":"XinweiChai/bike_analysis","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":10427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7980474593","text":"#เขียนโปรแกรมรับจำนวนเต็มบวก 1 จำนวน จากนั้นให้แสดงผลลัพธ์เป็นจำนวนเต็มดังกล่าวที่เขียนอยู่ในรูปของตัวเลขฐาน สอง\n\n#ตัวอย่างการทำงาน 1 \n\n#Enter number: 19\n#10011\n\n#ตัวอย่างการทำงาน 2 \n\n#Enter number: 6\n#110\n\nconvert = []\nx = int(input('Enter number: '))\nwhile x != 0:\n for i in range(100000):\n n = 2 * i\n if x - n == 0:\n convert.append(0)\n x = int(n/2)\n elif x - n == 1:\n convert.append(1)\n x = int(n/2)\n elif x == 0 or x== 1:\n break\nd = int(len(convert))-1\nfor _ in range(d+1):\n print(convert[d], end=\"\")\n d = d - 1\n \n ","repo_name":"ArnonGot/lab_python","sub_path":"example9.py","file_name":"example9.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"th","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21548304485","text":"#!/usr/bin/env python3\n\"\"\"Run GARFfield to get error\nFunctions:\n run() -> error\n\n get_error(path) function: define the error by your own\n\"\"\"\nimport os\nimport re\nimport subprocess\nfrom drlff.conf import env, files_input, files_output\n\n\nfor environ in ['PATH', 'LD_LIBRARY_PATH']:\n if environ in os.environ:\n env['PATH'] = ':'.join((os.environ['PATH'], env['PATH']))\n\n\ndef get_error(path):\n \"\"\"Define your own error function by read and parse file in \"path\"\n \"\"\"\n with open(path, 'r') as f:\n data = f.readlines()\n\n return float(re.findall('[\\d\\.]+', data[-1])[0])\n\n\ndef run():\n \"\"\"Run GARFfield and get error\n example:\n err = run()\n \"\"\"\n if not os.path.isdir(files_output['log']):\n if os.path.exists(files_output['log']):\n os.remove(files_output['log'])\n os.mkdir(files_output['log'])\n\n os.chdir(files_input['dir'])\n pipe = subprocess.Popen(\n ['garffield',\n files_input['geo'],\n files_input['ffield'],\n files_input['trainset'],\n files_input['params'],\n '-t', '1',\n '-p', '2'],\n env=env,\n stdout=subprocess.PIPE\n )\n out, err = pipe.communicate()\n pipe.terminate()\n del pipe\n with open(os.path.join(files_output['log'], 'drlff.ga.log'), 'a') as f:\n f.write(out.decode('utf-8'))\n return get_error(os.path.join(files_input['dir'], 'trainset.err.initial'))","repo_name":"WinterFu/Drlff_Scripts","sub_path":"drlff/environment/ga.py","file_name":"ga.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21184239972","text":"from aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton\n\n\ndef main_menu_keyboard() -> InlineKeyboardMarkup:\n \"\"\"\n Клавиатура для главного меню бота.\n\n :return: объект клавиатуры\n :rtype: InlineKeyboardMarkup\n \"\"\"\n\n keyboard = InlineKeyboardMarkup()\n\n make_request = InlineKeyboardButton(text=\"📛Оставить заявку\", callback_data='make_request')\n contact_us = InlineKeyboardButton(text=\"📞Связаться\", callback_data='contact_us')\n settings = InlineKeyboardButton(text=\"⚙Настройки\", callback_data='settings')\n more_contacts = InlineKeyboardButton(text='☎Полезные контакты', callback_data='more_contacts')\n\n keyboard.add(make_request, contact_us)\n keyboard.add(settings)\n keyboard.add(more_contacts)\n\n return keyboard\n\n\ndef request_menu_keyboard() -> InlineKeyboardMarkup:\n \"\"\"\n Клавиатура для меню заявок бота.\n\n :return: объект клавиатуры\n :rtype: InlineKeyboardMarkup\n \"\"\"\n\n keyboard = InlineKeyboardMarkup()\n\n send_request = InlineKeyboardButton(text=\"📛Оставить заявку\", callback_data='send_request')\n send_idea = InlineKeyboardButton(text=\"💡Поделиться предложением\", callback_data='send_idea')\n back = InlineKeyboardButton(text=\"🔙Назад\", callback_data='back')\n\n keyboard.add(send_request, send_idea)\n keyboard.add(back)\n\n return keyboard\n\n\ndef contacts_menu_keyboard() -> InlineKeyboardMarkup:\n \"\"\"\n Клавиатура для меню обратной связи бота.\n\n :return: объект клавиатуры\n :rtype: InlineKeyboardMarkup\n \"\"\"\n\n keyboard = InlineKeyboardMarkup()\n\n call_me = InlineKeyboardButton(text=\"📞Перезвоните мне\", callback_data='call_me')\n text_me = InlineKeyboardButton(text=\"📞Свяжитесь со мной в чат-боте\", callback_data='text_me')\n back = InlineKeyboardButton(text=\"🔙Назад\", callback_data='back')\n\n keyboard.add(call_me)\n keyboard.add(text_me)\n keyboard.add(back)\n\n return keyboard\n\n\ndef settings_menu_keyboard() -> InlineKeyboardMarkup:\n \"\"\"\n Клавиатура для меню настроек бота.\n\n :return: объект клавиатуры\n :rtype: InlineKeyboardMarkup\n \"\"\"\n\n keyboard = InlineKeyboardMarkup()\n\n change_name = InlineKeyboardButton(text=\"🛠️Поменять имя\", callback_data='change_name')\n change_phone = InlineKeyboardButton(text=\"🛠️Сменить номер\", callback_data='change_phone')\n back = InlineKeyboardButton(text=\"🔙Назад\", callback_data='back')\n\n keyboard.add(change_name, change_phone)\n keyboard.add(back)\n\n return keyboard\n","repo_name":"AlexSolokhin/housing_bot","sub_path":"keyboards/menu_keyboards.py","file_name":"menu_keyboards.py","file_ext":"py","file_size_in_byte":2839,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34271496083","text":"\nimport pandas as pd\nimport numpy as np\nimport Bio\nimport os\nfrom Bio import Entrez, SeqIO\nimport itertools\nimport argparse\nimport math\nimport torch\nfrom torch import nn\nimport numpy as np\nimport pandas as pd\nimport h5py\n\n\n\n#Defining a SNP class to perform simple LD filtering duties\nclass SNP:\n \n def __init__(self,rsid,position,chromosome):\n self.rsid = rsid\n self.position = position\n self.chr = chromosome\n Entrez.email = \"pradluzog@gmail.com\"\n Entrez.api_key = \"98ad62666b4bd2dc831f1824727d74d67c08\"\n \n\n \n def check_ld_snps(self,dataset,window = 1000):\n start_position = self.position - window + 1\n end_position = self.position + window\n dataset = dataset[dataset['Chromosome'] == self.chr]\n def extract_neighbour_snps(start_position, end_position, dataset):\n neighbour_snps = []\n for index,row in dataset.iterrows():\n if start_position <= dataset.loc[index,'Position'] <= end_position:\n neighbour_snps.append(dataset.loc[index,'MarkerName'])\n else:\n continue\n return neighbour_snps\n \n self.snps_in_window = extract_neighbour_snps(start_position,end_position,dataset)\n return self.snps_in_window\n \n def obtain_snp_sequence(self,window = 1000):\n start_position = self.position - window +1\n end_position = self.position + window\n if int(self.chr) < 10:\n id_chr = \"\".join([\"NC_00000\",str(self.chr)])\n else:\n id_chr = \"\".join([\"NC_0000\",str(self.chr)])\n\n handle = Entrez.efetch(db=\"nucleotide\",\n id = id_chr,\n rettype = \"fasta\",\n strand = 1,\n seq_start = start_position,\n seq_stop = end_position)\n record = SeqIO.read(handle,\"fasta\")\n self.snp_sequence = str(record.seq)\n return self.snp_sequence\n \n def obtain_all_comb_seq(self,dataset, window = 1000):\n \n def all_snp_combinations(a):\n combinations = []\n for k in range(0,len(a)):\n t = list(itertools.combinations(a,k+1))\n combinations.extend(t)\n return combinations\n \n self.combinations = all_snp_combinations(self.snps_in_window)\n comb_names = ['_'.join(x) for x in self.combinations if len(x)> 0]\n comb_names.append('_'.join(['Ref',self.rsid]))\n combination_dataset = dataset[dataset['MarkerName'].isin(self.snps_in_window)]\n sequences = []\n \n for comb in self.combinations:\n seq_to_change = self.snp_sequence\n start_position = self.position - window + 1\n end_position = self.position + window\n for k in range(0,len(comb)):\n idx = combination_dataset['MarkerName'] == comb[k]\n pos = combination_dataset.loc[idx,'Position']\n allele = str(combination_dataset.loc[idx,'Non_Effect_allele'].values[0])\n net_pos = int(pos) - int(start_position)\n seq_to_change = seq_to_change[:net_pos-1] + allele + seq_to_change[net_pos:]\n sequences.append(seq_to_change)\n sequences.append(self.snp_sequence)\n sequences_named = dict(zip(comb_names,sequences))\n return sequences_named\n \n \n def seq_combination(self,dataset,window = 1000):\n self.check_ld_snps(dataset,window)\n self.obtain_snp_sequence()\n self.combination_seq = self.obtain_all_comb_seq(dataset,window)\n return self.combination_seq\n \n \n def __str__(self):\n return \"The SNP in object is \"+self.rsid\n \n\nclass LambdaBase(nn.Sequential):\n def __init__(self, fn, *args):\n super(LambdaBase, self).__init__(*args)\n self.lambda_func = fn\n\n def forward_prepare(self, input):\n output = []\n for module in self._modules.values():\n output.append(module(input))\n return output if output else input\n\nclass Lambda(LambdaBase):\n def forward(self, input):\n return self.lambda_func(self.forward_prepare(input))\n\nclass Beluga(nn.Module):\n def __init__(self):\n super(Beluga, self).__init__()\n self.model = nn.Sequential(\n nn.Sequential(\n nn.Conv2d(4,320,(1, 8)),\n nn.ReLU(),\n nn.Conv2d(320,320,(1, 8)),\n nn.ReLU(),\n nn.Dropout(0.2),\n nn.MaxPool2d((1, 4),(1, 4)),\n nn.Conv2d(320,480,(1, 8)),\n nn.ReLU(),\n nn.Conv2d(480,480,(1, 8)),\n nn.ReLU(),\n nn.Dropout(0.2),\n nn.MaxPool2d((1, 4),(1, 4)),\n nn.Conv2d(480,640,(1, 8)),\n nn.ReLU(),\n nn.Conv2d(640,640,(1, 8)),\n nn.ReLU(),\n ),\n nn.Sequential(\n nn.Dropout(0.5),\n Lambda(lambda x: x.view(x.size(0),-1)),\n nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(67840,2003)),\n nn.ReLU(),\n nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(2003,2002)),\n ),\n nn.Sigmoid(),\n )\n\n def forward(self, x):\n return self.model(x)\n\n \n \n ","repo_name":"PradoVarathan/MultiSpecto","sub_path":"Multi_specto_class.py","file_name":"Multi_specto_class.py","file_ext":"py","file_size_in_byte":5465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36690815003","text":"# API 참조 \n# https://docs.aws.amazon.com/opensearch-service/latest/developerguide/custom-packages.html#custom-packages-updating\n# https://docs.aws.amazon.com/opensearch-service/latest/developerguide/configuration-api.html#configuration-api-actions-listpackagesfordomain\n# https://boto3.amazonaws.com/v1/documentation/api/1.18.51/reference/services/opensearch.html\nfrom requests_aws4auth import AWS4Auth\nimport boto3\nimport requests\nimport time\nimport sys\n\nregion = 'ap-northeast-2' # e.g. us-west-1\nservice = 'es'\nsrc_domain_name = 'dev-search-es-710'\ndest_domain_name = 'dev-search-es-250'\nsrc_host = 'https://vpc-dev-search-es-710-oam5efg2l5nia7bqh2yvbneklu.ap-northeast-2.es.amazonaws.com/' # include https:// and trailing /\ndest_host = 'https://vpc-dev-search-es-250-crdyurhlzxphmrjdfmtz7m23nu.ap-northeast-2.es.amazonaws.com/' # include https:// and trailing /\n\n# Use assume-role\nsession = boto3.Session(profile_name=\"default\")\nsts = session.client(\"sts\")\nresponse = sts.assume_role(\n RoleArn=\"arn:aws:iam::175979101058:role/dev-srch-opensearch-access-role\",\n RoleSessionName=\"es-access-session\"\n)\nawsauth = AWS4Auth(response['Credentials']['AccessKeyId'], response['Credentials']['SecretAccessKey'], region, service, session_token=response['Credentials']['SessionToken'])\n\n# Use local credential\n#credentials = boto3.Session().get_credentials()\n#awsauth = AWS4Auth(credentials.access_key, credentials.secret_key, region, service, session_token=credentials.token)\n\ndef es_cat_indices(host, prefix=\"\"):\n path = '_cat/indices'\n if len(prefix) > 0:\n path = f'_cat/indices/{prefix}*'\n \n url = host + path\n response = requests.get(url, auth=awsauth)\n return response.text\n\nresult = es_cat_indices(src_host)\nprint(result)\n\n","repo_name":"thyun/search-test","sub_path":"aws-elasticsearch/es.py","file_name":"es.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21306812607","text":"import logging\n#logging模式\n# logging.basicConfig(level=logging.DEBUG, \n# format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s', \n# datefmt='%a, %d %b %Y %H:%M:%S', \n# filename='C:/Users/weiyji/Desktop/file/test.log', \n# filemode='a') \n# logging.debug('debug message') \n# logging.info('info message') \n# logging.warning('warning message') \n# logging.error('error message') \n# logging.critical('critical message')\n\n#logger模式\nlogger = logging.getLogger()\nfh = logging.FileHandler('test.log')\nch = logging.StreamHandler()\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nfh.setFormatter(formatter)#文件输出格式\nch.setFormatter(formatter)#屏幕输出格式\nlogger.addHandler(fh)\nlogger.addHandler(ch)\nlogger.setLevel(logging.DEBUG)\nlogger.debug('logger debug message')\nlogger.info('logger info message')\nlogger.warning('logger warning message')\nlogger.error('logger error message')\nlogger.critical('logger critical message')","repo_name":"virgo-deity/python","sub_path":"test_logging.py","file_name":"test_logging.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34728683102","text":"#coding=utf-8\n\nfrom tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets\nimport tensorflow as tf\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport mnist\nimport picTransform as pt\n\nimageForTest= pt.tranform(\"one.png\")\n\ndef placeholder_inputs(batch_size):\n images_placeholder = tf.placeholder(tf.float32, shape=(batch_size, mnist.IMAGE_PIXELS))\n labels_placeholder = tf.placeholder(tf.int32, shape=(batch_size))\n return images_placeholder, labels_placeholder\n\n\n\ndef fill_feed_dict(data_set, images_pl, labels_pl):\n images_feed, labels_feed = data_set.next_batch(100, False)\n feed_dict = {\n images_pl: images_feed,\n labels_pl: labels_feed,\n }\n return feed_dict\n\ndef run_training():\n data_sets = read_data_sets('/tmp/tensorflow/mnist/input_data', False)\n with tf.Graph().as_default():\n\n images_placeholder, labels_placeholder = placeholder_inputs(100) # 每100张送进去计算一次所以这里的向量维度100\n\n logits = mnist.inference(images_placeholder, 128, 32)\n\n loss = mnist.loss(logits, labels_placeholder)\n\n train_op = mnist.training(loss, 0.01)\n\n summary = tf.summary.merge_all()\n\n saver = tf.train.Saver()\n\n init = tf.global_variables_initializer() #前面都是定义变量,这里对该图中的变量进行初始化\n\n sess = tf.Session()\n\n summary_writer = tf.summary.FileWriter(\"logs/\", sess.graph)\n\n sess.run(init)\n\n\n for step in xrange(2000): # 每一步取100张,一共2000部,取20万张\n\n feed_dict = fill_feed_dict(data_sets.train,\n images_placeholder,\n labels_placeholder)\n\n _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)\n #sess.run()函数对loss进行计算,得到lossValue用于查看拟合度是否上身,主要的计算在于执行train_op,返回值_表示匿名,既不关心(应为train_op只是一个计算过程,所以没有返回实体,所以打印并没有区别)\n if step % 100 == 0:\n print(loss_value)\n summary_str = sess.run(summary, feed_dict=feed_dict)\n summary_writer.add_summary(summary_str, step)\n summary_writer.flush()\n\n\n # print(train_op)\n save_path=saver.save(sess,\"myMnistNet/save_net.ckpt\")\n print(save_path)\n\n\ndef main(_):\n\n run_training()\n\n\nif __name__ == '__main__':\n\n tf.app.run(main=main)","repo_name":"ivanxia1988/tensorflow-exercise","sub_path":"trainProcess.py","file_name":"trainProcess.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39818359645","text":"from tkinter import *\r\nroot=Tk()\r\nroot.geometry(\"1000x1000\")\r\nlabel=Label(root, text='Welcome')\r\nlabel.pack()\r\nlabel_vest=Label(root).pack()\r\n\r\n\r\ncanvas = Canvas(root, width=1000, height=1000)\r\ncanvas.pack()\r\n\r\ndef get_mouse_position(event):\r\n x, y = event.x, event.y\r\n print('{}, {}'.format(x, y))\r\ncanvas.bind(\"\", get_mouse_position)\r\n\r\n","repo_name":"alexandra7578/intersectie","sub_path":"design.py","file_name":"design.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10223165256","text":"from abc import ABC, abstractmethod\nfrom typing import TypeVar, Generic, TYPE_CHECKING\n\nfrom . import cfg\n\nif TYPE_CHECKING:\n from . import Node\n\nT = TypeVar(\"T\")\n\n\nclass CommandBase(cfg.BaseConfig, Generic[T], ABC):\n\n __cmd_name__ = \"__cmd_command_base__\"\n __cmd_autoname__ = \"module\"\n\n def __init_subclass__(cls, **kwargs):\n if \"__cmd_name__\" not in vars(cls):\n if cls.__cmd_autoname__ == \"class\":\n cls.__cmd_name__ = cls.__name__\n elif cls.__cmd_autoname__ == \"module\":\n cls.__cmd_name__ = f'{cls.__module__}.{cls.__name__}'\n else:\n raise ValueError(\"__cmd_autoname__ should be 'class' or 'module'.\"\n \" If you intend to specify a command name, you should use __cmd_name__\")\n\n __cmd_name__ = getattr(cls, \"__cmd_name__\")\n __cmd_name_option__ = cfg.Lazy(lambda c: __cmd_name__, name=\"__cmd_name__\")\n __cmd_name_option__.__set_name__(cls, \"__cmd_name_option__\")\n cls.__cmd_name_option__ = __cmd_name_option__\n\n @abstractmethod\n def execute(self, ctx: \"Node\") -> T:\n \"\"\"\n Execute this command.\n\n :param ctx: A Node that received this command.\n :return: result of this execution. :py:data:`~.NoResponse`\n represents this execution doesn't have response.\n \"\"\"\n","repo_name":"susautw/rin-curium","sub_path":"src/rin/curium/command_base.py","file_name":"command_base.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18740563190","text":"import time\nfrom detectron2.evaluation.evaluator import DatasetEvaluator\nimport detectron2.utils.comm as comm\nimport itertools\nfrom collections import OrderedDict \n\nimport numpy as np \n\nclass GPUTimeEvaluator(DatasetEvaluator):\n def __init__(self, distributed, unit, out_file=None):\n self.distributed = distributed\n self.all_time = []\n self.unit = unit\n self.out_file = out_file\n if unit not in {'minisecond', 'second'}:\n raise NotImplementedError('Unsupported time unit %s'%unit)\n self.reset()\n \n def reset(self):\n self.all_time = []\n\n def process(self, inputs, outputs):\n for output in outputs:\n if 'time' in output.keys():\n self.all_time.append(output['time'])\n return\n\n def evaluate(self):\n if self.distributed:\n comm.synchronize()\n all_time = comm.gather(self.all_time, dst=0)\n all_time = list(itertools.chain(*all_time))\n \n if not comm.is_main_process():\n return {}\n else:\n all_time = self.all_time\n\n if len(all_time) == 0:\n return {'GPU_Speed': 0}\n \n all_time = np.array(all_time)\n \n speeds = 1. / all_time\n if self.unit == 'minisecond':\n speeds *= 1000\n\n mean_speed = speeds.mean() \n std_speed = speeds.std()\n max_speed = speeds.max()\n min_speed = speeds.min()\n mid_speed = np.median(speeds)\n\n if self.out_file is not None:\n f = open(self.out_file, 'a')\n curr_time = time.strftime('%Y/%m/%d,%H:%M:%S', time.localtime())\n f.write('%s\\t%.2f\\n'%(curr_time, mean_speed))\n f.close()\n\n ret_dict = {'Mean_FPS': mean_speed, 'Std_FPS': std_speed, 'Max_FPS': max_speed, 'Min_FPS': min_speed, 'Mid_FPS': mid_speed} \n\n return {'GPU_Speed': ret_dict}","repo_name":"ChenhongyiYang/QueryDet-PyTorch","sub_path":"utils/time_evaluator.py","file_name":"time_evaluator.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","stars":371,"dataset":"github-code","pt":"21"} +{"seq_id":"25630549045","text":"import ealib\nimport numpy as np\nimport datetime\n\n\ninit = ealib.CategoricalUniformInitializer()\nfoslearner = ealib.CategoricalLinkageTree(ealib.NMI(), ealib.FoSOrdering.AsIs)\ncriterion = ealib.ObjectiveAcceptanceCriterion()\n\narchive = ealib.BruteforceArchive([0])\ngomea = ealib.GOMEA(16, init, foslearner, criterion, archive)\nstepper = ealib.TerminationStepper((lambda : gomea), 10)\nproblem = ealib.OneMax(100)\nproblem_limited = ealib.Limiter(problem, 20000, datetime.timedelta(seconds=1))\nproblem_monitored = ealib.ElitistMonitor(problem_limited, criterion)\nf = ealib.SimpleConfigurator(problem_monitored, stepper, 42)\nf.run()\n\npop = f.getPopulation()\nprint(problem_limited.get_time_spent_ms())\nprint(problem_limited.get_num_evaluations())\nelitist = archive.get_archived()[0]\nprint(f\"elitist: {elitist}\")\nprint(np.array(pop.getData(ealib.GENOTYPECATEGORICAL, elitist), copy=False))\nprint(np.array(pop.getData(ealib.OBJECTIVE, elitist), copy=False))\n\n\n","repo_name":"8uurg/Impact-of-Asynchrony-on-MBEAs","sub_path":"EALib/python_examples/example_single_pop.py","file_name":"example_single_pop.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13360014431","text":"from refined.evaluation.evaluation import eval_all\nfrom refined.inference.processor import Refined\n\nrefined = Refined.from_pretrained(model_name='aida_model',\n entity_set='wikipedia',\n use_precomputed_descriptions=True)\nprint('EL results (with model fine-tuned on AIDA)')\neval_all(refined=refined, el=True, filter_nil_spans=False)\n\nrefined = Refined.from_pretrained(model_name='wikipedia_model',\n entity_set='wikipedia',\n use_precomputed_descriptions=True)\nprint('ED results (with model not fine-tuned on AIDA)')\neval_all(refined=refined, el=False)\n","repo_name":"amazon-science/ReFinED","sub_path":"replicate_results.py","file_name":"replicate_results.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":126,"dataset":"github-code","pt":"21"} +{"seq_id":"20599931541","text":"#!/usr/bin/env python3\n\n# Author: Jeffrey Grover\n# Purpose: Reverse complement a fastq or fasta file\n# per-base coverage output\n# Created: 2019-07-29\n\nfrom argparse import ArgumentParser\nimport gzip\n\n\ndef magic_opener(input_file):\n if input_file.endswith('gz'):\n return gzip.open(input_file, 'rt')\n else:\n return open(input_file, 'r')\n\n\ndef fq_iter(input_file):\n n = 0\n fq_record = []\n with magic_opener(input_file) as input_handle:\n for line in input_handle:\n n += 1\n fq_record.append(line.strip())\n if n == 4:\n yield fq_record\n n = 0\n fq_record = []\n\n\ndef rev_comp(fq_iter):\n comp_table = str.maketrans('ATCG', 'TAGC')\n for record in fq_iter:\n seq_id = record[0]\n rev_comp_seq = record[1].translate(comp_table)[::-1]\n separator = record[2]\n qual = record[3][::-1]\n print(seq_id, rev_comp_seq, separator, qual, sep='\\n')\n\n\n# ArgumentParser\n\ndef get_args():\n parser = ArgumentParser(\n description='Returns the reverse complement of a fastq file to stdout.')\n parser.add_argument('fastq',\n help='Input .fastq, may be gzipped',\n metavar='FILE.fastq(.gz)')\n return parser.parse_args()\n\n\n# Entry point\n\ndef main(args):\n rev_comp(fq_iter(args.fastq))\n\n\nif __name__ == '__main__':\n main(get_args())\n","repo_name":"groverj3/genomics_tools","sub_path":"fastq_reverse_comp.py","file_name":"fastq_reverse_comp.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28666448071","text":"import os\nimport time\nimport asyncio\nimport pytz\nfrom dotenv import load_dotenv\nfrom discord.ext import commands\nfrom discord import User, TextChannel, Role\nfrom datetime import datetime\n\nload_dotenv()\n\nCHANNEL_ID = int(os.getenv('CHANNEL_ID'))\nGUILD_ID = int(os.getenv('GUILD_ID'))\nROLE_ID = int(os.getenv('ROLE_ID'))\n\nclient = commands.Bot(command_prefix=\"$\")\nserver_timezone = pytz.timezone(\"Asia/Jakarta\")\n\n@client.event\nasync def on_ready():\n print('We have logged in as {0.user}'.format(client))\n await sendReminder()\n \n@client.event\nasync def on_message(message):\n if message.author == client.user:\n return\n\n await client.process_commands(message)\n \n@client.command()\nasync def ping(ctx, channel: TextChannel, user: User, *, message=\"\"):\n await channel.send(f\"{ctx.author.mention}: {message} {user.mention}\")\n\nasync def sendReminder():\n while(True):\n if (datetime.now(server_timezone).hour == 9 or datetime.now(server_timezone).hour == 18):\n channel = client.get_channel(CHANNEL_ID)\n guild = client.get_guild(GUILD_ID)\n if guild:\n role = guild.get_role(ROLE_ID)\n await channel.send(f\"https://www.hoyolab.com/genshin/ Daily HoyoLab {role.mention}\")\n else:\n return\n await asyncio.sleep(3600)\n \n\nclient.run(os.getenv('TOKEN'))","repo_name":"kakioshe/chronos-discord","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32778476312","text":"#Auth: Prajwal_anagani\n#LawJarp_a\n\n#import required libraries\nimport requests\nimport bs4\nimport sys\nimport math\n\n#Set some global variables\n\n#Can change so to retrieve more of less posts\nno_of_posts = 5\n\n#Put your own usr agent\nusr = \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36\"\n\nheaders = {'User-agent':usr}\nprint(\"Got you user-agent: \",usr)\n\n#Function used to get posts(duh)\ndef get_posts(t, top_no, headers):\n\n\t#Download the subreddit info\n data_req = requests.get(\"https://www.reddit.com/\"+t+\"/\", headers = headers)\n\n #Check if subreddit is valid\n if not (data_req.status_code == requests.codes.ok):\n \tprint(\"Invalid subreddit. Exiting program..\")\n \tsys.exit(0)\n\n\t#Parse it to bs4 format\n data = bs4.BeautifulSoup(data_req.text,'html.parser')\n\n\t#Get the no_of_posts required\n tops = data.find_all(class_=\"_1poyrkZ7g36PawDueRza-J _11R7M_VOgKO1RJyRSRErT3\")[0:top_no]\n\n\t#\n d = {}\n\t#Enumerate and handle the different types of posts\n for i, l in enumerate(tops):\n a = l.find(class_=\"_eYtD2XCVieq6emjKBH3m\").text\n\n b = \"UNKNOWN TYPE\"\n\n try:\n\t\t\t#Image post\n b = l.find(alt='Post image')['src']\n except:\n try:\n\t\t\t\t#Gif Post\n b = l.find(\"video\").find(\"source\")['src']\n except:\n try:\n\t\t\t\t\t#Imgur link\n b = l.find(class_ = \"styled-outbound-link\")['href']\n if(b[0]==\"/\"):\n b = \"https://www.reddit.com\"+b\n\n except:\n try:\n\t\t\t\t\t\t#Link to discussion\n b = \"https://www.reddit.com\"+l.find(class_ = \"SQnoC3ObvgnGjWt90zD9Z _2INHSNB8V5eaWp4P0rY_mE\")['href']\n except:\n pass\n\n\t\t#Goto the comments page\n c_url = l.find(\"a\")['href']\n\n\t\t#Get web info and parse it to bs4 format\n data_req = requests.get(c_url, headers = headers)\n data = bs4.BeautifulSoup(data_req.text,'html.parser')\n\n\t\t#Find and gather the top 5 comments\n k = data.find_all(class_ = \"_3cjCphgls6DH-irkVaA0GM\")[0:5]\n\n\t\t#Get the text part from the HTML tags\n k = list(map(lambda x: x.text, k))\n\n\t\t#Return dictionar with the format {Title_name:(Link,list of top 5 comments)}\n d[a] = (b,k)\n\n\t\t#print(\"%d %\" %(math.floor((((i+1)/top_no))*100)))\n\n\n return d\n\n#Get and display\nsr = input(\"Enter subreddit (format: 'r/sub'): \")\ntp = get_posts(sr, 5, headers)\nfor i in tp.keys():\n print(\"Title: \",i)\n print()\n print(\"Image/Gif/dicussion: \", tp[i][0])\n print()\n print(\"Top 5 comments:\")\n for k in tp[i][1]:\n print(\"-->\",k)\n print()\n print()\n","repo_name":"LawJarp-A/Project_I","sub_path":"Web_Scraping/reddit_script.py","file_name":"reddit_script.py","file_ext":"py","file_size_in_byte":2694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"75078383","text":"import whisper\nimport sys\nimport torch\n\nif len(sys.argv) < 2:\n print(\"Please input an jaudio file path.\")\n sys.exit()\nmodel = whisper.load_model(\"tiny\", device=\"cpu\")\nwhile True:\n finetune = input(\"Do you use finetuning model? (yes/no):\")\n if finetune == \"no\":\n break\n if finetune == \"yes\":\n model_path = \"ft_whisper_best.pth\"\n model.load_state_dict(torch.load(model_path))\n break\n else:\n print(\"Please input correctly. Try Again.\")\n\n_ = model.half()\n_ = model.cuda()\n\nfor m in model.modules():\n if isinstance(m, whisper.model.LayerNorm):\n m.float()\n\nmodel.eval()\nwith torch.inference_mode():\n result = model.transcribe(\n sys.argv[1],\n fp16=True,\n without_timestamps=True\n )\nprint(result[\"text\"])\n","repo_name":"Shiiya0418/Whisper-Sample-for-Jetson","sub_path":"speechrecog.py","file_name":"speechrecog.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"25850098777","text":"import time\nfrom turtle import Screen, Turtle\nfrom player import Player\nfrom car_manager import CarManager\nfrom scoreboard import Scoreboard\n\nscreen = Screen()\nscreen.setup(width=600, height=600)\nscreen.tracer(0)\n\ntim = Player()\ncar_mgr = CarManager()\nscore = Scoreboard()\n\nscreen.listen()\nscreen.onkey(tim.go_up, \"Up\")\n\ngame_is_on = True\nwhile game_is_on:\n time.sleep(0.1)\n screen.update()\n\n car_mgr.create_car()\n car_mgr.move_cars()\n\n # Detect car collision\n for car in car_mgr.all_cars:\n if car.distance(tim) < 20:\n game_is_on = False\n score.game_over()\n\n if tim.is_at_finish_line():\n tim.go_to_start()\n car_mgr.level_up()\n score.increase_level()\n\n\n\n\nscreen.exitonclick()","repo_name":"svetzh/Udemy_Courses","sub_path":"22_Crossing_turtle/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10597540538","text":"from lib.carillon import Carillon, CarillonStriker, Song\nfrom lib.direktorium import TodayDirektorium, Rank, Season\n\n_CustomStriker__sdir = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n 'songs')\n\n\nclass CustomStriker(CarillonStriker):\n\n TRINITATIS = 0x22 # A1SHARP\n MARIA = 0x25 # C2SHARP\n JOSEF = 0x27 # D2SHARP\n APOSTEL = 0x2A # F2SHARP\n BERNHARD = 0x2C # G2SHARP\n ENGEL = 0x2E # A2SHARP\n\n SONG_LOURDES = Song(os.path.join(_CustomStriker__sdir, 'Lourdes Lied.mid'))\n SONG_MARIANIC = {\n Season.ORDINARY:\n Song(os.path.join(_CustomStriker__sdir, 'Salve Regina.mid')),\n Season.CHRISTMAS:\n Song(os.path.join(_CustomStriker__sdir, 'Alma Redemptoris Mater.mid')),\n Season.LENT:\n Song(os.path.join(_CustomStriker__sdir, 'Ave Regina caelorum.mid')),\n Season.EASTER:\n Song(os.path.join(_CustomStriker__sdir, 'Regina caeli laetare.mid')),\n }\n","repo_name":"MatthiasGi/Glockenturm","sub_path":"software/customstriker.py","file_name":"customstriker.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43547993537","text":"from .. import models, schemas, oauth2\nfrom fastapi import Response, status, HTTPException, Depends, APIRouter\nfrom ..database import get_db\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import func\nfrom typing import Optional, List\n\nrouter = APIRouter(prefix=\"/posts\", tags=[\"Posts\"])\n\n# this is how it is done with regular SQL\n# @app.get(\"/posts\")\n# def get_posts():\n# cursor.execute(\"SELECT * FROM posts \")\n# posts = cursor.fetchall()\n# return {\"data\": posts}\n\n\n@router.get(\"/\", response_model=List[schemas.PostOut])\n# @router.get(\"/\")\ndef get_posts(db: Session = Depends(get_db), current_user: int = Depends(oauth2.get_current_user), limit: int = 10, skip: int = 0, search: Optional[str] = \"\"):\n # posts = db.query(models.Post).filter(models.Post.title.contains(search)).limit(limit).offset(skip).all()\n posts = db.query(models.Post, func.count(models.Vote.post_id).label(\"votes\")).join(models.Vote, models.Vote.post_id == models.Post.id,\n isouter=True).group_by(models.Post.id).filter(models.Post.title.contains(search)).limit(limit).offset(skip).all()\n return posts\n\n# just for testing purposes as first step, when working without databases\n# @app.post(\"/posts\", status_code=status.HTTP_201_CREATED)\n# def create_posts(post: Post, response: Response):\n# post_dict = post.dict()\n# post_dict['id'] = randrange(1,1000000)\n# my_posts.append(post_dict)\n# return {\"data\": post_dict}\n\n# this is how it is done with regular SQL\n# @app.post(\"/posts\", status_code=status.HTTP_201_CREATED)\n# def create_posts(post: Post):\n# cursor.execute(\"INSERT INTO posts (title, content, published) VALUES (%s, %s, %s) RETURNING *\",(post.title, post.content, post.published))\n# # Never do this, this will support SQL Injection. The above statement sanitizes that\n# # cursor.execute(f\"INSERT INTO posts (title, content, published) VALUES ({post.title},{post.content},{post.published})\")\n# conn.commit()\n# new_post = cursor.fetchone()\n# return {\"data\": new_post}\n\n\n@router.post(\"/\", status_code=status.HTTP_201_CREATED, response_model=schemas.Post)\ndef create_posts(post: schemas.PostCreate, db: Session = Depends(get_db), current_user: int = Depends(oauth2.get_current_user)):\n # takes post and converts it to a dictionary and unpack it with **\n #print(current_user.email)\n new_post = models.Post(owner_id=current_user.id, **post.dict())\n db.add(new_post)\n db.commit()\n # retrieve created post\n db.refresh(new_post)\n return new_post\n\n# just for testing purposes as first step, when working without databases\n# @app.get(\"/posts/{id}\")\n# def get_post(id: int, response: Response):\n# post = find_post(id)\n# # print(post)\n# if not post:\n# raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f\"post with id: {id} was not found\")\n# # response.status_code = status.HTTP_404_NOT_FOUND\n# # return {'message': f\"post with id: {id} was not found\"}\n# return {\"post_detail\": post}\n\n# this is how it is done with regular SQL\n# @app.get(\"/posts/{id}\")\n# def get_post(id: int):\n# # cursor.execute(\"SELECT * FROM posts WHERE id = %s\",(id))\n# cursor.execute(\"SELECT * FROM posts WHERE id = %s\",(str(id),))\n# post = cursor.fetchone()\n# if not post:\n# raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f\"post with id: {id} was not found\")\n# # response.status_code = status.HTTP_404_NOT_FOUND\n# # return {'message': f\"post with id: {id} was not found\"}\n# return {\"post_detail\": post}\n\n\n@router.get(\"/{id}\", response_model=schemas.PostOut)\ndef get_post(id: int, db: Session = Depends(get_db), current_user: int = Depends(oauth2.get_current_user)):\n # post = db.query(models.Post).filter(models.Post.id == id).first()\n post = db.query(models.Post, func.count(models.Vote.post_id).label(\"votes\")).join(models.Vote, models.Vote.post_id == models.Post.id,\n isouter=True).group_by(models.Post.id).filter(models.Post.id == id).first()\n print(post)\n if not post:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"post with id: {id} was not found\")\n # response.status_code = status.HTTP_404_NOT_FOUND\n # return {'message': f\"post with id: {id} was not found\"}\n return post\n\n # post = db.query(models.Post, func.count(models.Vote.post_id).label(\"votes\")).join(\n # models.Vote, models.Vote.post_id == models.Post.id, isouter=True).group_by(models.Post.id).filter(models.Post.id == id).first()\n\n # if not post:\n # raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n # detail=f\"post with id: {id} was not found\")\n\n # return post\n\n# just for testing purposes as first step, when working without databases\n# @app.delete(\"/posts/{id}\", status_code=status.HTTP_204_NO_CONTENT)\n# def delete_post(id: int):\n# # find the index in the array that has required ID\n# index = find_index_post(id)\n# # print(index)\n# if index == None:\n# raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f\"post with id: {id} doesn't exist\")\n# my_posts.pop(index)\n# return Response(status_code=status.HTTP_204_NO_CONTENT)\n\n# this is how it is done with regular SQL\n# @app.delete(\"/posts/{id}\", status_code=status.HTTP_204_NO_CONTENT)\n# def delete_post(id: int):\n# cursor.execute(\"DELETE FROM posts WHERE id = %s RETURNING *\",(str(id),))\n# # index = find_index_post(id)\n# conn.commit()\n# deleted_post = cursor.fetchone()\n# if deleted_post == None:\n# raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f\"post with id: {id} doesn't exist\")\n# return Response(status_code=status.HTTP_204_NO_CONTENT)\n\n\n@router.delete(\"/{id}\", status_code=status.HTTP_204_NO_CONTENT)\ndef delete_post(id: int, db: Session = Depends(get_db), current_user: int = Depends(oauth2.get_current_user)):\n # define query\n post_query = db.query(models.Post).filter(models.Post.id == id)\n # get the post\n post = post_query.first()\n\n # Checks if the post which he wants to delete exists\n if post == None:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"post with id: {id} doesn't exist\")\n # checks if user wants to delete his own post, only this is permited\n if post.owner_id != current_user.id:\n raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,\n detail='Not authorized to perform requested action')\n # delete the post\n post_query.delete(synchronize_session=False)\n db.commit()\n return Response(status_code=status.HTTP_204_NO_CONTENT)\n\n# just for testing purposes as first step, when working without databases\n# @app.put(\"/posts/{id}\")\n# def update_post(id: int, post: Post):\n# print(post)\n# index = find_index_post(id)\n# # print(index)\n# if index == None:\n# raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f\"post with id: {id} doesn't exist\")\n# post_dict = post.dict()\n# post_dict['id'] = id\n# my_posts[index] = post_dict\n# return {'data': post_dict}\n\n# this is how it is done with regular SQL\n# @app.put(\"/posts/{id}\")\n# def update_post(id: int, post: Post):\n# cursor.execute(\"UPDATE posts SET title = %s, content = %s, published = %s WHERE id = %s RETURNING *\", (post.title, post.content, post.published, str(id)))\n# conn.commit()\n# updated_post = cursor.fetchone()\n# if updated_post == None:\n# raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f\"post with id: {id} doesn't exist\")\n# return {\"data\": updated_post}\n\n\n@router.put(\"/{id}\", response_model=schemas.Post)\ndef update_post(id: int, updated_post: schemas.PostCreate, db: Session = Depends(get_db), current_user: int = Depends(oauth2.get_current_user)):\n # query to find post with specific id\n post_query = db.query(models.Post).filter(models.Post.id == id)\n # grab that specific post\n post = post_query.first()\n # if it doesn't exist run a 404\n if post == None:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"post with id: {id} doesn't exist\")\n # if the post is not a post from the logged in user throw an error\n if post.owner_id != current_user.id:\n raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,\n detail='Not authorized to perform requested action')\n # if it exists update it\n post_query.update(updated_post.dict(), synchronize_session=False)\n # commit this changes\n db.commit()\n # return the post\n return post_query.first()\n","repo_name":"pastatopf/python-api-development","sub_path":"app/routers/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":8861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9597506226","text":"__author__ = 'Daniel Musgrave '\n__version__ = '1.0'\n__date__ = 'June 12th, 2007'\n\nimport textwrap\n\nfrom deploy.util import rxml\n\nfrom deploy.util.difftest import expand, NoneEntry, NewEntry\nfrom deploy.util.difftest.handlers import DiffHandler\n\nNEW = '-'\nNONE = ''\n\nclass VariablesHandler(DiffHandler):\n def __init__(self, data, obj):\n self.name = 'variables'\n\n self.vdata = data\n self.obj = obj\n self.vars = {}\n\n DiffHandler.__init__(self)\n\n expand(self.vdata)\n\n def clear(self):\n self.vars.clear()\n\n def mdread(self, metadata, *args, **kwargs):\n for node in metadata.xpath('/metadata/variables/value', []):\n item = node.getxpath('@variable')\n if len(node.getchildren()) == 0:\n self.vars[item] = NoneEntry(item)\n else:\n self.vars[item] = rxml.serialize.unserialize(node[0])\n\n def mdwrite(self, root, *args, **kwargs):\n vars = rxml.config.Element('variables', parent=root)\n for var in set(self.vdata):\n parent = rxml.config.Element('value', parent=vars, attrib={'variable': var})\n val = eval('self.obj.%s' % var)\n parent.append(rxml.serialize.serialize(val))\n\n def diff(self):\n self.diffdict = VariablesDiffDict()\n for var in set(self.vdata):\n try:\n val = eval('self.obj.%s' % var)\n except AttributeError:\n val = NoneEntry(var)\n if self.vars.has_key(var):\n if self.vars[var] != val:\n self.diffdict[var] = (self.vars[var], val)\n else:\n self.diffdict[var] = (NewEntry(), val)\n\n for old_var in self.vars:\n if old_var not in self.vdata:\n # A variable is not being tracked anymore. Definitely a\n # change worth noting.\n self.diffdict[old_var] = (self.vars[old_var], NoneEntry(old_var))\n if self.diffdict: self.dprint('variables: %s' % self.diffdict)\n return self.diffdict\n\n\nclass VariablesDiffDict(dict):\n width = 35 # max width of var returns (should be less than half term width)\n\n def __str__(self): return self.__repr__()\n def __repr__(self):\n s = ''\n\n for key, vartup in self.items():\n metadata, memory = vartup\n\n s += key + '\\n'\n s += ' %-35.35s %-35.35s\\n' % ('Metadata', 'Memory')\n\n if metadata:\n if isinstance(metadata, NewEntry):\n c1 = NEW\n else:\n c1 = repr(metadata)\n else:\n c1 = NONE\n\n if memory:\n if isinstance(memory, NewEntry):\n c2 = NEW\n else:\n c2 = repr(memory)\n else:\n c2 = NONE\n\n s += '\\n'.join(_wrap_lines(c1, c2, self.width)) + '\\n'\n\n return s\n\ndef _wrap_lines(s1, s2, width):\n l1 = textwrap.wrap(s1, width-1)\n l2 = textwrap.wrap(s2, width-1)\n\n if len(l1) > len(l2):\n for i in range(len(l2), len(l1)):\n l2.append(' '*width)\n elif len(l1) < len(l2):\n for i in range(len(l1), len(l2)):\n l1.append(' '*width)\n\n lf = []\n for i in range(0, len(l1)):\n lf.append(' %-35.35s %-35.35s' % (l1[i], l2[i]))\n return lf\n","repo_name":"kaywilliams/rosie","sub_path":"deploy/util/difftest/handlers/variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25056860810","text":"def fib(n):\n if n==0:\n return 0\n elif n==1:\n return 1\n else:\n return fib(n-1)+fib(n-2)\n\ndef factorial(n):\n if n==0:\n return 1\n return n*factorial(n-1)\n\nif __name__ == '__main__':\n print(fib(6))\n print(factorial(5))","repo_name":"Daniel107x-hub/Algorithmic-Toolbox","sub_path":"Divid-And-Conquer/Python/Res/recursividad.py","file_name":"recursividad.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21585805862","text":"import random\n#Indivitual Class\nclass Individual:\n geneLength = 0\n fitness = 0\n word = None\n def __init__(self,geneLength,w):\n self.geneLength = geneLength\n self.gene = []\n self.word = w\n for i in range(self.geneLength):\n self.gene.append(chr(random.randint(65,90)))\n\n def calculateFitness(self):\n self.fitness = 0\n for i in range(len(self.gene)):\n self.fitness += abs(ord(self.gene[i]) - ord(self.word[i]))\n return self.fitness\n\n def showGene(self):\n print(self.gene,'=',self.fitness)\n\n#Population Class\nclass Population:\n populationSize = 0\n indivituals = []\n fitness = []\n fittest = None\n fittestIndivitualIndex = None\n totalFItness = 0\n finalWord = None\n geneLen = 0\n foundValue = 0\n\n def __init__(self,populationSize,geneLen,word):\n self.populationSize = populationSize\n self.geneLen = geneLen\n self.finalWord = word.upper()\n self.initializePopulation()\n\n def initializePopulation(self):\n for i in range(self.populationSize):\n self.indivituals.append(Individual(self.geneLen,self.finalWord))\n\n def calculateFitnessOfIndiviuals(self):\n self.totalFItness = 0\n for i in range(self.populationSize):\n self.totalFItness += self.indivituals[i].calculateFitness()\n\n def sortIndiviual(self):\n for i in range(len(self.indivituals)):\n for j in range(0,len(self.indivituals)-i-1):\n if self.indivituals[j].fitness > self.indivituals[j+1].fitness:\n self.indivituals[j], self.indivituals[j+1] = self.indivituals[j+1], self.indivituals[j]\n\n def copyIndivitual(self,ind):\n n = Individual(self.geneLen,self.finalWord)\n n.gene.clear()\n for i in ind.gene:\n n.gene.append(i)\n return n\n\n def getFittestIndivitual(self):\n self.sortIndiviual()\n self.fittestIndivitualIndex = 0\n self.fittest = self.indivituals[0].fitness\n return self.copyIndivitual(self.indivituals[0])\n\n def getSecondFittestIndivitual(self):\n return self.copyIndivitual(self.indivituals[1])\n\n def getLeastIndivitual(self):\n return self.populationSize - 1\n\n def printPopulation(self):\n self.sortIndiviual()\n for i in self.indivituals:\n i.showGene()\n\n def found(self):\n for w in self.indivituals:\n if w.fitness is self.foundValue:\n return True\n return False\n\n\n\ndef crossOver(fittest,secondFittest):\n randomCrossOverPoint = random.randint(0,fittest.geneLength-1)\n for i in range(randomCrossOverPoint,fittest.geneLength):\n fittest.gene[i],secondFittest.gene[i] = secondFittest.gene[i], fittest.gene[i]\n\n\n\ndef mutation(fittest, secondFittest):\n randomCrossOverPoint = random.randint(0, fittest.geneLength-1)\n if fittest.gene[randomCrossOverPoint] is not fittest.word[randomCrossOverPoint]:\n fittest.gene[randomCrossOverPoint] = fittest.word[random.randint(0,fittest.geneLength-1)]\n if secondFittest.gene[randomCrossOverPoint] is not secondFittest.word[randomCrossOverPoint]:\n secondFittest.gene[randomCrossOverPoint] = secondFittest.word[random.randint(0,fittest.geneLength-1)]\n\n\ndef offSpring(population,fittest,secondFittest):\n population.indivituals[population.getLeastIndivitual()].gene.clear()\n if fittest.calculateFitness() <= secondFittest.calculateFitness():\n for i in fittest.gene:\n population.indivituals[population.getLeastIndivitual()].gene.append(i)\n else:\n for i in secondFittest.gene:\n population.indivituals[population.getLeastIndivitual()].gene.append(i)\n\n# -- Main --\npopSize = 10\nword = 'Hello World'\ngeneLen = len(word)\npopulation = Population(popSize,geneLen,word)\npopulation.calculateFitnessOfIndiviuals()\ngenerationCount = 1\npopulation.printPopulation()\nprint('Generation:',generationCount)\nprint('-----------------------')\nwhile population.found() is False:\n\n fittest = population.getFittestIndivitual()\n secondFittest = population.getSecondFittestIndivitual()\n crossOver(fittest, secondFittest)\n mutation(fittest,secondFittest)\n offSpring(population,fittest,secondFittest)\n generationCount += 1\n population.calculateFitnessOfIndiviuals()\n population.printPopulation()\n print('Generation:',generationCount)\n print('-----------------------')\n","repo_name":"kakanghosh/Artificial-Intelligence","sub_path":"GeneticAlgo/Word.py","file_name":"Word.py","file_ext":"py","file_size_in_byte":4442,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"13823275551","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/8/31 17:46\n# @Author : userzhang\n\nimport datetime\n\nimport time\nfrom celery.task import Task\nimport django\ndjango.setup()\n\n\nfrom http.client import HTTPConnection\nimport json\n\nclass EmailManage:\n '''发送激活链接类'''\n @staticmethod\n def sendMail(toMail, sub, content, cc=\"\", html=\"False\"):\n toMail = type(toMail) != list and toMail or \",\".join(toMail)\n data = {\"system_name\": \"Edge Connect\", \"toMail\": toMail, \"content\": content, \"mail_title\": sub,\n \"cc\": cc, \"html_status\": html, \"errors_type\": \"email\"}\n try:\n body = json.dumps(data)\n headers = {\"Content-Type\": \"text/html; charset=utf-8\"}\n conn = HTTPConnection(\"10.129.4.95:9090\")\n conn.request(method=\"POST\", url=\"/data_manage/\", body=body, headers=headers)\n res = conn.getresponse().read().decode(encoding=\"utf-8\") #bug1: 老是卡顿在这里\n res=json.loads(res)\n if res[\"status\"] == \"True\":\n print(\"邮件发送成功\")\n else:\n print(\"邮件发送失败\")\n\n except Exception as e:\n print(\"邮件发送失败-\", repr(e))\n\n\nclass Send_register_email_tasks(Task):\n name = \"Send_register_email_tasks\"\n\n def run(self, receiver, subject, html_message):\n '''定义任务函数 注册用户时发送Email'''\n EmailManage.sendMail(receiver, subject, html_message)\n return True\n\n\n","repo_name":"zhouzhousc/EB","sub_path":"apps/user/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71126578292","text":"#! /usr/bin/env python3\n\n\n# Note import path which is different to molecule code\n#from pyscf.pbc import gto, scf, df, dft\nfrom pyscf import gto, scf, df, dft\nimport numpy\n\n\ncell = gto.M(\n atom ='''O 0.0 0.0 0.0''',\n basis ='cc-pcvdz',\n unit ='A',\n spin=2,\n verbose = 5,\n cart=False,\n)\n\n\n\nmf = scf.UHF(cell)\nmf.kernel()\n\ntitle='O-UHF-Triplet'\n\n\nfrom PyscfToQmcpack import savetoqmcpack\nsavetoqmcpack(cell,mf,title)\n","repo_name":"QMCPACK/qmcpack","sub_path":"tests/molecules/O_ae_pyscf_UHF/pyscf/O.py","file_name":"O.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":261,"dataset":"github-code","pt":"21"} +{"seq_id":"71399030132","text":"from aiogram import Dispatcher, types\nfrom aiogram.dispatcher.filters import Text\nimport aiogram.utils.markdown as fmt\nfrom app.utils import questions_ru, available_options_ru\nfrom app.db import get_database\ncollection_name = get_database()\n\nasync def ru_start(message: types.Message):\n chat_id = collection_name.find({\"chat_id\" : message.from_id})\n await message.answer(\"Для доступа к материалам курса, отправьте пожалуйста ваше имя:\")\n\n\nasync def seller_call_center_info(message: types.Message):\n await message.answer(\"Контакты колл центра АО «ҚазАзот»:\\n+7 777 420 1081\\n+7 771 949 9119\")\n return\n\n\nasync def get_video(message: types.Message):\n await message.answer(f\"{fmt.hide_link('https://www.youtube.com/watch?v=ag-7xYiip4I')}Как оплатить заказ?\",\n parse_mode=types.ParseMode.HTML)\n await message.answer(f\"{fmt.hide_link('https://www.youtube.com/watch?v=VUtrV6N3AS8')}Как подписать накладную?\",\n parse_mode=types.ParseMode.HTML)\n return\n\n\nasync def qmarket_call_center_info(message: types.Message):\n await message.answer(\"Контакты колл центра QMARKET:\\n+7 777 448 36 36\")\n return\n\n\nasync def faq_questions(message: types.Message):\n keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)\n keyboard.add(\"⬅ ️Вернуться на главную страницу\")\n for name in list(questions_ru.keys()):\n keyboard.add(name)\n await message.answer(\"Выберите вопрос:\", reply_markup=keyboard)\n\n\ndef register_handlers_ru(dp: Dispatcher):\n dp.register_message_handler(ru_start, commands=\"ru\")\n dp.register_message_handler(seller_call_center_info, Text(equals=\"📞 Узнать колл центр продавцов\"))\n dp.register_message_handler(faq_questions, Text(equals=\"❓ Ответы на часто задаваемые вопросы\"))\n dp.register_message_handler(get_video, Text(equals=\"▶️ Посмотреть обучающие видео\"))\n dp.register_message_handler(qmarket_call_center_info, Text(equals=\"📞 Узнать колл центр Qmarketa\"))","repo_name":"qontrastss/helper_bot","sub_path":"app/handlers/ru.py","file_name":"ru.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39477805553","text":"import os\r\nimport click\r\nimport json\r\nimport numpy as np\r\nimport cv2\r\nimport uuid\r\nfrom collections import deque\r\nimport glob\r\nimport sys\r\n\r\n\r\ndef resize_frame(img, new_size):\r\n old_w, old_h, _ = img.shape\r\n l = float(min(new_size[0], new_size[1]) / max(old_w, old_h))\r\n new_frame = cv2.resize(img, None, fx=l, fy=l, interpolation=cv2.INTER_LINEAR)\r\n w, h, _ = new_frame.shape\r\n delta_w = new_size[0] - w\r\n delta_h = new_size[1] - h\r\n\r\n color = [0, 0, 0]\r\n\r\n resized_frame = cv2.copyMakeBorder(new_frame, 0, delta_w, 0, delta_h, cv2.BORDER_CONSTANT, value=color)\r\n return resized_frame\r\n\r\n\r\n@click.command()\r\n@click.option('--size', nargs=2, type=int, help='Size of frames.')\r\n@click.option('--src', type=str, help='Directory with annotated vids.')\r\n@click.option('--dst', type=str, help='Folder to store sequences.')\r\ndef transform(src:str, size, dst: str):\r\n wc = '*.mp4'\r\n for video in glob.iglob(os.path.join(src, '**', wc), recursive=True):\r\n action_idx = {\r\n 'empty': 0,\r\n 'left': 1,\r\n 'right': 2,\r\n 'far': 3,\r\n 'close': 4,\r\n 'up': 5,\r\n 'down': 6,\r\n 'smile': 7,\r\n 'eyes': 8\r\n }\r\n\r\n def _save_npy_and_ann(seq, seq_filename, annotation):\r\n np.save(seq_filename, np.array(seq))\r\n\r\n with open(seq_filename + '.npy.annotations', 'w') as fan:\r\n json.dump(annotation, fan)\r\n\r\n return seq\r\n\r\n if not os.path.exists(video):\r\n click.secho('--> Video file not found!', fg='red')\r\n return -1\r\n\r\n if not os.path.exists(video + '.annotations'):\r\n click.secho('--> Annotation file not found!', fg='red')\r\n return -1\r\n\r\n reader = None\r\n try:\r\n reader = cv2.VideoCapture(video)\r\n except:\r\n click.secho('--> Failed to initialize video source {}'.format(video), fg='red')\r\n return -1\r\n\r\n filename = os.path.splitext(os.path.basename(video))[0]\r\n\r\n raw_folder = None\r\n\r\n raw_folder = os.path.join(dst, filename+'_'+str(size[0]))\r\n if not os.path.exists(raw_folder):\r\n os.makedirs(raw_folder)\r\n else:\r\n continue\r\n\r\n fb_folder = None\r\n if fb:\r\n fb_folder = os.path.join(dst, filename+'_'+str(length)+'_'+str(size[0])+'_'+'fb')\r\n if not os.path.exists(fb_folder):\r\n os.makedirs(fb_folder)\r\n\r\n frame_cnt = 0\r\n seq_raw = deque(list())\r\n seq_fb = deque(list())\r\n frame_idx = deque(list())\r\n\r\n data = None\r\n with open(video+'.annotations', 'r') as f:\r\n data = json.load(f)\r\n\r\n prev_frame = None\r\n total = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))\r\n\r\n while True:\r\n # frame = reader.next_frame()\r\n _, frame = reader.read()\r\n if frame is None:\r\n break\r\n if raw:\r\n seq_filename_raw = str(uuid.uuid4().hex)\r\n seq_filename_raw = os.path.join(raw_folder, seq_filename_raw)\r\n if fb:\r\n seq_filename_fb = str(uuid.uuid4().hex)\r\n seq_filename_fb = os.path.join(fb_folder, seq_filename_fb)\r\n\r\n print(len(seq_raw))\r\n\r\n if len(seq_raw) == length:\r\n cur_action = None\r\n prev_action = None\r\n action_cnt = 0\r\n conf = 0.0\r\n\r\n for idx in frame_idx:\r\n cur_action = get_action_from_annotations(idx, data)\r\n\r\n if cur_action is not None:\r\n if cur_action == prev_action or prev_action is None:\r\n action_cnt += 1\r\n prev_action = cur_action\r\n else:\r\n conf = float(action_cnt / length)\r\n cur_action = prev_action\r\n\r\n if action_cnt != 0:\r\n conf = float(action_cnt / length)\r\n\r\n if conf >= high or conf <= low:\r\n ann = dict()\r\n if conf <= low:\r\n ann['class'] = action_idx['empty']\r\n else:\r\n ann['class'] = action_idx[cur_action]\r\n ann['conf'] = float('{:.2}'.format(conf))\r\n\r\n print(ann['class'], ': ', ann['conf'])\r\n\r\n if raw:\r\n seq_raw = _save_npy_and_ann(seq_raw, seq_filename_raw, ann)\r\n if fb:\r\n seq_fb = _save_npy_and_ann(seq_fb, seq_filename_fb, ann)\r\n else:\r\n seq_raw.popleft() if raw else None\r\n seq_fb.popleft() if fb else None\r\n\r\n frame_idx.popleft()\r\n\r\n w, h, _ = frame.shape\r\n\r\n if raw:\r\n frame_raw_resized = resize_frame(frame, size)\r\n seq_raw.append(frame_raw_resized)\r\n if fb:\r\n if prev_frame is None: prev_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n frame_fb = cv2.calcOpticalFlowFarneback(prev_frame, frame_gray, None, 0.5, 3, 15, 3, 5, 1.2, 0)\r\n prev_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n frame_fb = np.concatenate((frame_fb, np.zeros((w, h, 1))), axis=2)\r\n frame_fb_resized = resize_frame(frame_fb, size)\r\n seq_fb.append(frame_fb_resized)\r\n\r\n print(\"\\033[H\\033[J\")\r\n per = round(float(frame_cnt/total), 2)\r\n print(video)\r\n print(\"Progress: {0}%\".format(per*100))\r\n\r\n frame_idx.append(frame_cnt)\r\n frame_cnt += 1\r\n\r\n\r\nif __name__ == '__main__':\r\n transform()\r\n","repo_name":"rybach/parsers","sub_path":"video_to_one_sequence.py","file_name":"video_to_one_sequence.py","file_ext":"py","file_size_in_byte":5884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17516941370","text":"import sys\nsys.stdin = open('input.txt')\n\n# 목표 : 전체 제품의 최소 생산 비용\n\ndef dfs(s):\n global cost, rlt\n\n if rlt < cost:\n return\n if s == N:\n if rlt > cost:\n rlt = cost\n return\n\n for i in range(N):\n if visited[i] == 0:\n visited[i] = 1\n cost += arr[s][i]\n # print(f's: {s}, i: {i}, cost: {cost}')\n dfs(s+1)\n visited[i] = 0\n cost -= arr[s][i]\n\n\n\n\n\n\n\nT = int(input())\n\nfor tc in range(1, T + 1):\n N = int(input())\n arr = [list(map(int,input().split())) for _ in range(N)]\n visited = [0] * N\n rlt = 99999\n cost = 0\n dfs(0)\n print(f'#{tc} {rlt}')","repo_name":"Sangtaek-Lee/Algorithm","sub_path":"problem/0331/5209_최소생산비용/sol1.py","file_name":"sol1.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33763285311","text":"from rxn.chemutils.rdf.reaction_properties import (\n ReactionProperties,\n find_compounds,\n find_compounds_with_category,\n)\n\n\ndef test_instantiate_reaction_properties() -> None:\n # Basic example with a reaction containing a mix of values and list properties\n meta = {\n \"RXN:TEST_FILE\": \"1\",\n \"RXN:TEST_RXN_CONDITIONS\": \"2\",\n \"RXN:TEST_RXN_SEQUENCE_NUM\": \"3\",\n \"RXN:TEST_TABLEROW_NUM\": \"4\",\n \"RXN:SOME_LIST_PROPERTY(1):BROAD\": \"5\",\n \"RXN:SOME_LIST_PROPERTY(1):MEDIUM\": \"6\",\n \"RXN:SOME_LIST_PROPERTY(1):NARROW\": \"7\",\n \"RXN:SOME_LIST_PROPERTY(2):BROAD\": \"8\",\n \"RXN:SOME_LIST_PROPERTY(2):MEDIUM\": \"9\",\n \"RXN:SOME_LIST_PROPERTY(2):NARROW\": \"10\",\n \"RXN:PRODUCT(1):YIELD_%\": \"11\",\n \"RXN:PRODUCT(2):YIELD_%\": \"12\",\n \"RXN:PRODUCT(4):YIELD_%\": \"13\",\n }\n properties = ReactionProperties(meta)\n assert properties.properties == {\n \"TEST_FILE\": \"1\",\n \"TEST_RXN_CONDITIONS\": \"2\",\n \"TEST_RXN_SEQUENCE_NUM\": \"3\",\n \"TEST_TABLEROW_NUM\": \"4\",\n \"SOME_LIST_PROPERTY\": [\n {\n \"BROAD\": \"5\",\n \"MEDIUM\": \"6\",\n \"NARROW\": \"7\",\n },\n {\n \"BROAD\": \"8\",\n \"MEDIUM\": \"9\",\n \"NARROW\": \"10\",\n },\n ],\n \"PRODUCT\": [\n {\"YIELD_%\": \"11\"},\n {\"YIELD_%\": \"12\"},\n {},\n {\"YIELD_%\": \"13\"},\n ],\n }\n\n\ndef test_generic_properties() -> None:\n # Example containing some properties from the generic reaction as well\n meta = {\n \"RXN:P1\": \"1\",\n \"RXN:SOME_P\": \"2\",\n \"RXN:SEQ_P\": \"3\",\n \"RXN:ROW\": \"4\",\n \"RXN:GENERIC:DF\": \"5\",\n \"RXN:GENERIC:DUMMY\": \"6\",\n }\n properties = ReactionProperties(meta)\n assert properties.properties == {\n \"P1\": \"1\",\n \"SOME_P\": \"2\",\n \"SEQ_P\": \"3\",\n \"ROW\": \"4\",\n \"GENERIC\": {\n \"DF\": \"5\",\n \"DUMMY\": \"6\",\n },\n }\n\n\ndef test_prunes_mol_and_symbol() -> None:\n # Example removing \"MOL(1):\" and \"SYMBOL(1):\"\n meta = {\n \"RXN:SOLVENT(1):MOL(1):MOLSTRUCTURE\": \"1\",\n \"RXN:SOLVENT(1):MOL(1):SYMBOL(1):SYMBOL\": \"2\",\n \"RXN:SOLVENT(2):MOL(1):MOLSTRUCTURE\": \"3\",\n \"RXN:SOLVENT(2):MOL(1):SYMBOL(1):SYMBOL\": \"4\",\n \"RXN:GENERIC:SOLVENT(1):MOL(1):MOLSTRUCTURE\": \"5\",\n \"RXN:GENERIC:SOLVENT(1):MOL(1):SYMBOL(1):SYMBOL\": \"6\",\n }\n properties = ReactionProperties(meta)\n assert properties.properties == {\n \"SOLVENT\": [\n {\n \"MOLSTRUCTURE\": \"1\",\n \"SYMBOL\": \"2\",\n },\n {\n \"MOLSTRUCTURE\": \"3\",\n \"SYMBOL\": \"4\",\n },\n ],\n \"GENERIC\": {\n \"SOLVENT\": [\n {\n \"MOLSTRUCTURE\": \"5\",\n \"SYMBOL\": \"6\",\n },\n ],\n },\n }\n\n\ndef test_find_compounds_simple() -> None:\n property_dict = {\n \"MOLSTRUCTURE\": \"3\",\n \"SYMBOL\": \"4\",\n }\n\n assert list(find_compounds(property_dict)) == [\n {\n \"MOLSTRUCTURE\": \"3\",\n \"SYMBOL\": \"4\",\n },\n ]\n\n\ndef test_find_compounds_nested() -> None:\n property_dict = {\n \"SOLVENT\": [\n {\n \"MOLSTRUCTURE\": \"1\",\n \"SYMBOL\": \"2\",\n },\n {\n \"MOLSTRUCTURE\": \"3\",\n \"SYMBOL\": \"4\",\n },\n ],\n \"GENERIC\": {\n \"CATALYST\": [\n {\n \"MOLSTRUCTURE\": \"5\",\n \"SYMBOL\": \"6\",\n },\n ],\n },\n }\n\n assert list(find_compounds(property_dict)) == [\n {\n \"MOLSTRUCTURE\": \"1\",\n \"SYMBOL\": \"2\",\n },\n {\n \"MOLSTRUCTURE\": \"3\",\n \"SYMBOL\": \"4\",\n },\n {\n \"MOLSTRUCTURE\": \"5\",\n \"SYMBOL\": \"6\",\n },\n ]\n\n\ndef test_find_compounds_with_category() -> None:\n property_dict = {\n \"SOLVENT\": [\n {\n \"MOLSTRUCTURE\": \"1\",\n \"SYMBOL\": \"2\",\n },\n {\n \"MOLSTRUCTURE\": \"3\",\n \"SYMBOL\": \"4\",\n },\n ],\n \"GENERIC\": {\n \"CATALYST\": [\n {\n \"MOLSTRUCTURE\": \"5\",\n \"SYMBOL\": \"6\",\n },\n ],\n },\n \"NO_CATEGORY\": { # This one is not in a list -> empty category.\n \"MOLSTRUCTURE\": \"8\",\n \"SYMBOL\": \"9\",\n },\n }\n\n assert list(find_compounds_with_category(property_dict)) == [\n (\n {\n \"MOLSTRUCTURE\": \"1\",\n \"SYMBOL\": \"2\",\n },\n \"SOLVENT\",\n ),\n (\n {\n \"MOLSTRUCTURE\": \"3\",\n \"SYMBOL\": \"4\",\n },\n \"SOLVENT\",\n ),\n (\n {\n \"MOLSTRUCTURE\": \"5\",\n \"SYMBOL\": \"6\",\n },\n \"CATALYST\",\n ),\n (\n {\n \"MOLSTRUCTURE\": \"8\",\n \"SYMBOL\": \"9\",\n },\n \"\",\n ),\n ]\n","repo_name":"rxn4chemistry/rxn-chemutils","sub_path":"tests/rdf/test_reaction_properties.py","file_name":"test_reaction_properties.py","file_ext":"py","file_size_in_byte":5309,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"21"} +{"seq_id":"42904053262","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 19 18:35:54 2021\nhttps://nbviewer.jupyter.org/url/www.cs.toronto.edu/~rgrosse/courses/csc421_2019/tutorials/tut2/autograd_tutorial.ipynb\n@author: eeltink\n\"\"\"\nimport matplotlib.pyplot as plt\nimport autograd\nimport autograd.numpy as np\nimport autograd.numpy.random as npr\nfrom autograd import grad\nfrom autograd.misc import flatten # flatten_func\n\nfrom autograd.misc.optimizers import sgd\n\n# def sgd(grad, init_params, callback=None, num_iters=200, step_size=0.1, mass=0.9):\n# \"\"\"Stochastic gradient descent with momentum.\n# grad() must have signature grad(x, i), where i is the iteration number.\"\"\"\n# flattened_grad, unflatten, x = flatten_func(grad, init_params)\n\n# velocity = np.zeros(len(x))\n# for i in range(num_iters):\n# g = flattened_grad(x, i)\n# if callback:\n# callback(unflatten(x), i, unflatten(g))\n# velocity = mass * velocity - (1.0 - mass) * g\n# x = x + step_size * velocity\n# return unflatten(x)\n\n#%% Generate synthetic data\nx = np.linspace(-5, 5, 100)\nt = x ** 3 - 20 * x + 10 + npr.normal(0, 4, x.shape[0])\nplt.figure()\nplt.plot(x, t, 'r.')\n\n#%% NN\ninputs = x.reshape(x.shape[-1],1)\nW1 = npr.randn(1,4)\nb1 = npr.randn(4)\nW2 = npr.randn(4,4)\nb2 = npr.randn(4)\nW3 = npr.randn(4,1)\nb3 = npr.randn(1)\n\nparams = { 'W1': W1, 'b1': b1, 'W2': W2, 'b2': b2, 'W3': W3, 'b3': b3 }\n\ndef relu(x):\n return np.maximum(0, x)\n\nnonlinearity = np.tanh\n#nonlinearity = relu\n\ndef predict(params, inputs):\n h1 = nonlinearity(np.dot(inputs, params['W1']) + params['b1'])\n h2 = nonlinearity(np.dot(h1, params['W2']) + params['b2'])\n output = np.dot(h2, params['W3']) + params['b3']\n # output = nonlinearity(np.dot(h2, params['W3']) + params['b3'])\n return output\n\ndef loss(params, i):\n output = predict(params, inputs)\n return (1.0 / inputs.shape[0]) * np.sum(0.5 * np.square(output.reshape(output.shape[0]) - t))\n\nprint(loss(params, 0))\n\noptimized_params = sgd(grad(loss), params, step_size=0.01, num_iters=5000)\nprint(optimized_params)\nprint(loss(optimized_params, 0))\n\nfinal_y = predict(optimized_params, inputs)\nplt.figure()\nplt.plot(x, t, 'r.')\nplt.plot(x, final_y, 'b-')\n\n#%%","repo_name":"DebbieCodes/PredatorPreyBackProp","sub_path":"AutogradExamples/AutogradExampleNW2.py","file_name":"AutogradExampleNW2.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11315579633","text":"import joblib\nfrom torch.utils.data import dataloader, DataLoader, random_split\nfrom torchvision.datasets.celeba import CelebA\nfrom torchvision import transforms\nimport lmdb\nfrom torch.utils.data import Dataset\nfrom io import BytesIO\nfrom PIL import Image\n\ncelebA_data_path = \"/data/kaggle/shared/Data/\"\nffhq_lmdb_data_path = \"/data/kaggle/shared/Data/ffhq_lmdb\"\nbeauty_lmdb_data_path = \"/data/kaggle/shared/Data/meinv_superior_lmdb\"\n\n\ndef data_transforms(img_size):\n SetRange = transforms.Lambda(lambda X: 2 * X - 1.)\n SetScale = transforms.Lambda(lambda X: X / X.sum(0).expand_as(X))\n\n transform = transforms.Compose([transforms.RandomHorizontalFlip(),\n transforms.CenterCrop(148),\n transforms.Resize(img_size),\n transforms.ToTensor(),\n SetRange])\n\n return transform\n\n\ndef get_celebA_dataloader(image_size: int, batch_size: int, split: str) -> dataloader.DataLoader:\n celebA_dataset = CelebA(root=celebA_data_path, split=split, transform=data_transforms(image_size), download=False)\n celebA_dataloader = dataloader.DataLoader(celebA_dataset, batch_size=batch_size, shuffle=True, drop_last=True)\n return celebA_dataloader\n\n\nffhq_t = transforms.Compose(\n [\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n # transforms.Lambda(lambda X: 2 * X - 1.),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True),\n ]\n)\n\n\ndef get_ffhq_dataloader(resolution: int, batch_size: int, transform=ffhq_t, split=\"train\"):\n dataset = MultiResolutionDataset(ffhq_lmdb_data_path, transform)\n dataset.resolution = resolution\n length = len(dataset)\n train_size, validate_size = int(0.8 * length), int(0.2 * length)\n train_set, validate_set = random_split(dataset, [train_size + 1, validate_size])\n if split == \"train\":\n return DataLoader(train_set, shuffle=True, batch_size=batch_size, num_workers=4, drop_last=True)\n if split == \"val\":\n return DataLoader(validate_set, shuffle=True, batch_size=batch_size, num_workers=4, drop_last=True)\n return None\n\n\ndef get_beauty_dataloader(resolution: int, batch_size: int, transform=ffhq_t, split=\"train\"):\n dataset = MultiResolutionDataset(beauty_lmdb_data_path, transform)\n dataset.resolution = resolution\n length = len(dataset)\n train_size, validate_size = int(0.9 * length), int(0.1 * length)\n train_set, validate_set = random_split(dataset, [train_size + 1, validate_size])\n if split == \"train\":\n return DataLoader(train_set, shuffle=True, batch_size=batch_size, num_workers=4, drop_last=True)\n if split == \"val\":\n return DataLoader(validate_set, shuffle=True, batch_size=batch_size, num_workers=4, drop_last=True)\n return None\n\nclass MultiResolutionDataset(Dataset):\n def __init__(self, path, transform, resolution=8):\n self.env = lmdb.open(\n path,\n max_readers=32,\n readonly=True,\n lock=False,\n readahead=False,\n meminit=False,\n )\n\n if not self.env:\n raise IOError('Cannot open lmdb dataset', path)\n\n with self.env.begin(write=False) as txn:\n self.length = int(txn.get('length'.encode('utf-8')).decode('utf-8'))\n\n self.resolution = resolution\n self.transform = transform\n\n def __len__(self):\n return self.length\n\n def __getitem__(self, index):\n with self.env.begin(write=False) as txn:\n key = f'{self.resolution}-{str(index).zfill(5)}'.encode('utf-8')\n img_bytes = txn.get(key)\n\n buffer = BytesIO(img_bytes)\n img = Image.open(buffer)\n img = self.transform(img)\n\n return img, 1\n\nfrom diffusers import PNDMScheduler\nfrom diffusers import StableDiffusionPipeline\n\nStableDiffusionPipeline.from_pretrained()\n","repo_name":"chengxiaoy/kaleido","sub_path":"data_samples.py","file_name":"data_samples.py","file_ext":"py","file_size_in_byte":3928,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"27928325617","text":"import tensorflow as tf\n\nsess = tf.Session()\nindices = [[0, 0], [1, 2]]\nvalues = [1, 2]\ndense_shape = [3, 4]\n\ntf_sparse_tensor = tf.SparseTensor(indices=indices, values=values, dense_shape=dense_shape)\nouts = sess.run(tf_sparse_tensor) # SparseTensorValue\n\nprint(outs)\n\n","repo_name":"AugF/semi-gcn","sub_path":"test/demoSparseTensor.py","file_name":"demoSparseTensor.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"71967302773","text":"class Solution:\n def sortedSquares(self, A: List[int]) -> List[int]:\n if len(A)<=1:\n return [s**2 for s in A]\n left = []\n right = []\n for num in A:\n if num<=0:\n left.append(num)\n else:\n right.append(num)\n ret = []\n i = len(left)-1\n j = 0\n while i>=0 and j 8):\n invalidInput = True\n else:\n invalidInput = False\n except:\n invalidInput = True\n\n if(choice == 1):\n AddItemsToEnd(bookList)\n elif (choice == 2):\n DeleteByName()\n elif (choice == 3):\n DeleteByPos(bookList)\n elif (choice == 4):\n InsertInList()\n elif (choice == 5):\n ClearList()\n elif (choice == 6):\n PrintHorizontally()\n elif (choice == 7):\n SortList()\n else:\n break\n\n time.sleep(1)\n \n\n\ndef DisplayList(aList):\n print(\"\\n-------- List --------\\n\")\n \n index = 1\n for item in aList:\n print(f\"{index}. {item}\")\n index += 1\n\n print()\n\n time.sleep(1)\n\n\ndef AddItemsToEnd(aList):\n DisplayList(aList)\n\n title = input(\"Please enter an item to add to the end of the list!\\n\")\n\n aList.append(title)\n\n DisplayList(aList)\n\n\ndef DeleteByName():\n if (len(bookList) == 0):\n print(\"Book list is empty. Please add books to use this function.\")\n else:\n validInput = False\n while(not validInput):\n DisplayList(bookList)\n\n titleToDelete = input(\"Enter name of book to delete!\\n\")\n\n try:\n bookList.remove(titleToDelete)\n validInput = True\n except:\n print(\"That title is not in your list!\\n\")\n validInput = False\n\n DisplayList(bookList)\n\n \ndef DeleteByPos(aList):\n if (len(aList) == 0):\n print(\"List is empty. Please add books to use this function.\")\n else:\n validInput = False\n while (not validInput):\n DisplayList(aList)\n\n try:\n indexToDelete = int(input(f\"Enter a position to delete. 1-{len(aList)}!\\n\"))\n if(indexToDelete < 1 or indexToDelete > len(aList)):\n print(\"Invalid input!\")\n validInput = False\n else:\n validInput = True\n del aList[indexToDelete - 1]\n except:\n print(\"Invalid input!\")\n validInput = False\n\n DisplayList(aList)\n\ndef InsertInList():\n if (len(bookList) == 0):\n print(\"Book list is empty. Please add books to use this function.\")\n else:\n validInput = False\n while (not validInput):\n DisplayList(bookList)\n\n try:\n indexToInsert = int(input(f\"Enter a position to insert to. 1-{len(bookList)}!\\n\"))\n title = input(f\"Please enter a title for position {indexToInsert}!\\n\")\n\n if (indexToInsert < 1 or indexToInsert > len(bookList)):\n print(\"Invalid input!\")\n validInput = False\n else:\n validInput = True\n bookList.insert(indexToInsert - 1, title)\n \n except:\n print(\"Invalid input!\")\n validInput = False\n\n DisplayList(bookList)\n\n\n\ndef ClearList():\n if (len(bookList) == 0):\n print(\"Book list is empty. Please add books to use this function.\")\n else:\n shouldDelete = input(\"\"\"⚠️ ARE YOU SURE YOU WANT TO CLEAR YOUR LIST? ⚠️\n \nType in 'clear' all lowercase to clear list. Enter to cancel.\n \"\"\")\n\n if(shouldDelete == \"clear\"):\n bookList.clear()\n\n\ndef PrintHorizontally():\n if (len(bookList) == 0):\n print(\"Book list is empty. Please add books to use this function.\")\n else:\n print(\"\\n-------- Book List --------\\n\")\n\n index = 1\n for item in bookList:\n sys.stdout.write(f\"{index}. {item}, \")\n index += 1\n\n print()\n\n time.sleep(1)\n\n\ndef SortList():\n if (len(bookList) == 0):\n print(\"Book list is empty. Please add books to use this function.\")\n else:\n bookList.sort()\n\n DisplayList(bookList)\n\n\ndef InsultMenu():\n choice = 1\n while (True):\n print(\"\"\"\\n------------ Shakespearean Insult Generator ------------\n\n1. Random Insult.\n2. Custom Insult.\n3. Insult list editor.\n4. Main Menu.\n\n------------------------------------------------------------\"\"\")\n\n time.sleep(1)\n\n invalidInput = True\n while (invalidInput):\n try:\n choice = int(input(\"Enter 1-4 to choose an option!\\n\"))\n if (choice < 1 or choice > 4):\n invalidInput = True\n else:\n invalidInput = False\n except:\n invalidInput = True\n\n if (choice == 1):\n RandomInsult()\n elif (choice == 2):\n CustomInsult()\n elif (choice == 3):\n InsultEditor()\n else:\n break\n\n time.sleep(1)\n\n\ndef RandomInsult():\n index1 = random.randint(0, len(List1))\n index2 = random.randint(0, len(List2))\n index3 = random.randint(0, len(List3))\n\n print(f\"Thou is a {List1[index1]} {List2[index2]} {List3[index3]}!\")\n\n\ndef CustomInsult():\n print(\"Custom Insult Generator!\")\n\n invalidInput = True\n while (invalidInput):\n try:\n DisplayList(List1)\n choice1 = int(input(f\"Enter 1-{len(List1)} to choose an option!\\n\"))\n if (choice1 < 1 or choice1 > len(List1)):\n invalidInput = True\n else:\n try:\n DisplayList(List2)\n choice2 = int(input(f\"Enter 1-{len(List2)} to choose an option!\\n\"))\n if (choice2 < 1 or choice2 > len(List2)):\n invalidInput = True\n else:\n try:\n DisplayList(List3)\n choice3 = int(input(f\"Enter 1-{len(List3)} to choose an option!\\n\"))\n if (choice3 < 1 or choice3 > len(List3)):\n invalidInput = True\n else:\n invalidInput = False\n except:\n invalidInput = True\n except:\n invalidInput = True\n except:\n invalidInput = True\n\n \n print(f\"Thou is a {List1[choice1 - 1]} {List2[choice2 - 1]} {List3[choice3 - 1]}!\")\n\n\ndef InsultEditor():\n print(\"Insult editor!\")\n invalidInput = True\n while (invalidInput):\n try:\n print(\"\\n------- FIRST LIST -------\\n\")\n DisplayList(List1)\n print(\"\\n------- SECOND LIST -------\\n\")\n DisplayList(List2)\n print(\"\\n------- THIRD LIST -------\\n\")\n DisplayList(List3)\n\n listChoice = int(input(\"Enter 1-3 to choose a list to edit!\\n\"))\n if (listChoice < 1 or listChoice > 3):\n invalidInput = True\n else:\n invalidInput = False\n except:\n invalidInput = True\n\n invalidInput = True\n while (invalidInput):\n try:\n choice = int(input(\"Enter 1 to delete an item or 2 to add one!\\n\"))\n if (choice < 1 or choice > 2):\n invalidInput = True\n else:\n invalidInput = False\n except:\n invalidInput = True\n\n if(choice == 1):\n if(listChoice == 1):\n DeleteByPos(List1)\n elif(listChoice == 2):\n DeleteByPos(List2)\n elif(listChoice == 3):\n DeleteByPos(List3)\n else:\n if(listChoice == 1):\n AddItemsToEnd(List1)\n elif(listChoice == 2):\n AddItemsToEnd(List2)\n elif(listChoice == 3):\n AddItemsToEnd(List3)\n \n\n\nmain()","repo_name":"irridium04/python","sub_path":"Assignments/Assignment4.py","file_name":"Assignment4.py","file_ext":"py","file_size_in_byte":11284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"44500051605","text":"# def sum(n):\n# n = 0\n# for i in range (n):\n# return n = n + i + 1\n\n# print(sum(10))\n\n####################################\nresult = 0\nfor i in range(10):\n result = result + i + 1\n print(\"in the {}th iteration, new result = {}\".format(i,result))\n \n#result i new result \n# 0 1 1\n# 1 2 3\n\n# if multiplication, then change initial condition (result) = 1\n\n# SUM OF ODD NUMBERS \nresult = 0\nfor i in range(1,1000,2):\n result = result + i\n\n# SUM OF EVEN NUMBERS \nresult = 0 \nfor i in range(0, 1001, 2):\n result = result + i \n# print(result)\n\n########### WHILE STATEMENT ###########\ndef countdown(n):\n while n > 0:\n print(n)\n n = n-1\n print(\"Blastoff!\")\n\n# countdown(10)\n\niteration = 0 \ncount = 0 \nwhile iteration < 5: \n for letter in \"Hello, world\":\n count +=1\n print(\"Iteration \" + str(iteration) + \"; count is: \" + str(count))\n iteration +=1\n\niteration = 0\nwhile iteration < 5:\n count = 0\n for letter in \"hello, world\":\n count += 1\n print(\"Iteration \" + str(iteration) + \"; count is: \" + str(count))\n iteration += 1 \n# count resets to 0\n\niteration = 0\nwhile iteration < 5:\n count = 0\n for letter in \"hello, world\":\n count += 1\n if iteration % 2 == 0:\n break\n print(\"Iteration \" + str(iteration) + \"; count is: \" + str(count))\n iteration += 1 \n\n\nwhile True: \n line = input('> ')\n if line == \"done\":\n break\n print(line)\n\nmysum = 0\nfor i in range(5,11,2):\n mysum += i\n if mysum ==5:\n break\n\n# print(mysum)\n\n\n\n\n","repo_name":"jboenawan/Python-Programming-MIS3640","sub_path":"Session7-Iterations.py","file_name":"Session7-Iterations.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33937032326","text":"from typing import TypeAlias\n\nimport disnake\nfrom disnake.ext import commands, plugins\nfrom helper import CogMetaData, ConVar, DatBot, Settings, bytes2human\nfrom helper.mimic3 import Mimic3Wrapper\n\n# Meta\nmetadata = CogMetaData(\n name=\"mimic3\",\n key=\"mimic3\",\n require_key=True,\n)\nplugin: plugins.Plugin[DatBot] = plugins.Plugin(\n name=metadata.name, logger=f\"cog.{metadata.name}\"\n)\n\n# Aliases\nCmdInter: TypeAlias = disnake.ApplicationCommandInteraction\nGuildInter: TypeAlias = disnake.GuildCommandInteraction\n\n# Context Vars\nm3api: ConVar[Mimic3Wrapper] = ConVar(f\"{metadata.name}.api\")\n\n\n@plugin.load_hook\nasync def cog_load():\n conf: dict = Settings.keys.get(metadata.key)\n wrapper = Mimic3Wrapper(\n await plugin.bot.make_http(metadata.name, base_url=conf[\"url\"]),\n lang=conf.get(\"lang\", \"en_US\"),\n model=conf.get(\"model\", \"vctk_low\"),\n voice=conf.get(\"voice\", \"p376\"),\n )\n m3api.set(wrapper)\n\n\n@commands.slash_command(name=metadata.name)\nasync def cmd(inter: CmdInter):\n plugin.logger.debug(f\"{inter.author.name} @ {inter.guild.name}\")\n\n\n@cmd.sub_command(\"speak\")\nasync def ping(inter: CmdInter, text: str):\n \"\"\"Generates a wav file using the selected voice\n Parameters\n ----------\n text: a body of text to generate\"\"\"\n await inter.response.defer()\n buf, blen = await m3api.get().speak(text)\n await inter.send(\n f\"Size: '{bytes2human(blen)}'\", file=disnake.File(buf, \"output.wav\")\n )\n\n\nsetup, teardown = plugin.create_extension_handlers()\n","repo_name":"Stinky-c/DatBot","sub_path":"src/cogs/mimic3_cog.py","file_name":"mimic3_cog.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"25184396320","text":"import tkinter as tk\nfrom Source.Controller.controller import *\nfrom Source.View.Components.backButton import *\n\ndef interface_transaction():\n trUI = tk.Toplevel()\n\n def clear():\n # Function to clear input fields\n title_input.delete(0, \"end\")\n value_input.delete(0, \"end\")\n category_input.delete(0, \"end\")\n\n trUI.title(\"Adicionar Transação\")\n trUI.geometry(\"800x800\")\n trUI.config(padx=10, pady=100)\n trUI.resizable(width=False, height=False)\n\n header_label = tk.Label(trUI, text=\"Preencha todos os campos para adicionar a sua transação.\", font=(\"Calibri\", 16, \"italic\"))\n header_label.pack()\n\n title_label = tk.Label(trUI, text=\"Título\", font=(\"Calibri\", 14))\n title_label.pack(pady=10)\n title_input = tk.Entry(trUI, text=\"title\", width=30, font=(\"Calibri\", 12))\n title_input.pack(pady=5)\n\n value_label = tk.Label(trUI, text=\"Valor\", font=(\"Calibri\", 14))\n value_label.pack(pady=10)\n value_input = tk.Entry(trUI, text=\"value\", width=30, font=(\"Calibri\", 12))\n value_input.pack(pady=5)\n\n category_label = tk.Label(trUI, text=\"Tipologia\", font=(\"Calibri\", 14))\n category_label.pack(pady=10)\n category_input = tk.Entry(trUI, text=\"tipologia\", width=30, font=(\"Calibri\", 12))\n category_input.pack(pady=5)\n\n atentation_label = tk.Label(trUI, text=\"[DICA]\\n\\nNo campo TIPOLOGIA, coloque a referência que caracterize essa transação\\n Por exemplo: Receita, Despesas, Gastos, Ganhos, e etc.\", font=(\"Calibri\", 12, \"italic\"))\n atentation_label.pack()\n\n add_transaction_btn = tk.Button(\n trUI,\n text=\"Adicionar Transação\",\n width=36,\n pady=15,\n bg=\"#5f9ea0\",\n fg=\"white\",\n font=(\"Calibri\", 12, \"bold\"),\n command=lambda: add_transaction(title=title_input.get(), value=value_input.get(), category=category_input.get(), func_clear=clear)\n )\n add_transaction_btn.pack(pady=40)\n\n back_button(trUI, trUI.destroy)\n","repo_name":"jotadevss/GFinance","sub_path":"Source/View/transaction.py","file_name":"transaction.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70130095412","text":"from playwright.async_api import Page\nfrom playwright_stealth import StealthConfig, stealth_async\nfrom wrighter.plugin import Plugin, context\n\n\nclass StealthAsync(Plugin):\n \"\"\"Apply stealth to pages\"\"\"\n\n def __init__(self, stealth_config: StealthConfig | None = None) -> None:\n self.stealth_config = stealth_config if stealth_config is not None else StealthConfig()\n super().__init__()\n\n @context(\"on\", \"page\")\n async def context_on_page(self, page: Page) -> None:\n self.logger.debug(f\"Applied stealth to {page}\")\n await stealth_async(page, config=self.stealth_config)\n\n\n__all__ = [\"StealthAsync\"]\n","repo_name":"zigai/wrighter-plugins","sub_path":"wrighter_plugins/stealth_async.py","file_name":"stealth_async.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"24446975862","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nfrom fastsam import FastSAM, FastSAMPrompt\nimport resources as res\n\n\nsys.path.append(\"..\")\n\n# Defining sam model\nsam_checkpoint = res.find('other/FastSAM-x.pt')\nmodel_type = \"vit_h\"\nDEVICE = \"cpu\"\nmodel = FastSAM(sam_checkpoint)\n\ndef do_sam(IMAGE_PATH, input_points, input_labels):\n # input_point = np.array([[x, y]])\n # input_label = np.array([1])\n\n everything_results = model(IMAGE_PATH, device=DEVICE, retina_masks=True, conf=0.4, iou=0.9, )\n prompt_process = FastSAMPrompt(IMAGE_PATH, everything_results, device=DEVICE)\n\n ann = prompt_process.point_prompt(points=input_points, pointlabel=input_labels)\n\n return prompt_process, ann\n\ndef create_mask_image(ann, output):\n # New array to store RGB values\n mask = np.zeros((400, 300, 3), dtype=np.uint8)\n\n # Generate a random RGB color\n random_color = np.random.randint(0, 256, 3)\n\n # Replace False values with black and True values with random color\n mask[ann.squeeze()] = random_color\n mask[~ann.squeeze()] = [0, 0, 0]\n\n return mask\n\n","repo_name":"s-du/SkyStock","sub_path":"pointify_engine/fastdetector.py","file_name":"fastdetector.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"27706126056","text":"# -*- coding: utf-8 -*-\n\nfrom AccessControl import ClassSecurityInfo\nfrom AccessControl import Unauthorized\nfrom collective.contact.plonegroup.utils import get_organization\nfrom dexterity.localrolesfield.field import LocalRoleField\nfrom imio.history.interfaces import IImioHistory\nfrom imio.history.utils import getLastAction\nfrom imio.history.utils import getLastWFAction\nfrom imio.prettylink.interfaces import IPrettyLink\nfrom plone import api\nfrom plone.app.textfield import RichText\nfrom plone.dexterity.content import Container\nfrom plone.dexterity.schema import DexteritySchemaPolicy\nfrom plone.directives import form\nfrom Products.CMFPlone.utils import safe_unicode\nfrom Products.PloneMeeting.config import PMMessageFactory as _\nfrom Products.PloneMeeting.interfaces import IDXMeetingContent\nfrom Products.PloneMeeting.utils import findMeetingAdvicePortalType\nfrom Products.PloneMeeting.utils import get_event_field_data\nfrom Products.PloneMeeting.utils import getWorkflowAdapter\nfrom Products.PloneMeeting.utils import historize_object_data\nfrom Products.PloneMeeting.utils import isModifiedSinceLastVersion\nfrom Products.PloneMeeting.widgets.pm_richtext import PMRichTextFieldWidget\nfrom z3c.form.browser.radio import RadioFieldWidget\nfrom zope import schema\nfrom zope.component import getAdapter\nfrom zope.i18n import translate\nfrom zope.interface import implements\nfrom zope.schema.interfaces import IVocabularyFactory\nfrom zope.schema.vocabulary import SimpleTerm\nfrom zope.schema.vocabulary import SimpleVocabulary\n\n\nclass IMeetingAdvice(IDXMeetingContent):\n \"\"\"\n MeetingAdvice schema\n \"\"\"\n\n advice_group = LocalRoleField(\n title=_(u'title_advice_group'),\n description=_(u\"Choose a group.\"),\n vocabulary=u'Products.PloneMeeting.content.advice.advice_group_vocabulary',\n required=True,\n )\n advice_type = schema.Choice(\n title=_(u'title_advice_type'),\n description=_(u\"Choose an advice type.\"),\n vocabulary=u'Products.PloneMeeting.content.advice.advice_type_vocabulary',\n required=True,\n )\n form.widget('advice_hide_during_redaction', RadioFieldWidget)\n advice_hide_during_redaction = schema.Bool(\n title=_(u'title_advice_hide_during_redaction'),\n description=_(\"If you do not want the advice to be shown immediately after redaction, you can check this \"\n \"box. This will let you or other member of your group work on the advice before showing it. \"\n \"Note that if you lose access to the advice (for example if the item state evolve), \"\n \"the advice will be considered 'Not given, was under edition'. A manager will be able \"\n \"to publish it nevertheless.\"),\n required=False,\n )\n form.widget('advice_comment', PMRichTextFieldWidget)\n advice_comment = RichText(\n title=_(u\"title_advice_comment\"),\n description=_(\"Enter the official comment.\"),\n required=False,\n allowed_mime_types=(u\"text/html\", )\n )\n form.widget('advice_observations', PMRichTextFieldWidget)\n advice_observations = RichText(\n title=_(u\"title_advice_observations\"),\n description=_(\"Enter optionnal observations if necessary.\"),\n required=False,\n allowed_mime_types=(u\"text/html\", )\n )\n advice_reference = schema.TextLine(\n title=_(u\"title_advice_reference\"),\n description=_(\"Enter a reference for this advice if necessary.\"),\n required=False,\n )\n form.mode(advice_row_id='hidden')\n advice_row_id = schema.TextLine(\n title=_(u\"title_advice_row_id\"),\n description=_(\"Linked advice row id, this is managed programmatically.\"),\n required=False,\n )\n\n\n@form.default_value(field=IMeetingAdvice['advice_type'])\ndef advice_typeDefaultValue(data):\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(data.context)\n # manage when portal_type accessed from the Dexterity types configuration\n return cfg and cfg.getDefaultAdviceType() or ''\n\n\n@form.default_value(field=IMeetingAdvice['advice_hide_during_redaction'])\ndef advice_hide_during_redactionDefaultValue(data):\n published = data.context.REQUEST.get('PUBLISHED')\n if not published:\n return False\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(data.context)\n # manage when portal_type accessed from the Dexterity types configuration\n hidden = cfg and published.ti.id in cfg.getDefaultAdviceHiddenDuringRedaction() or False\n if hidden:\n api.portal.show_message(_(\"advice_hide_during_redaction_set_auto_to_true\"),\n request=data.context.REQUEST)\n return hidden\n\n\ndef get_advice_label(advice_info):\n \"\"\"Render an advice label useable in several places.\"\"\"\n res = advice_info[\"name\"]\n if advice_info[\"delay\"] and advice_info[\"delay_label\"]:\n res = u\"{0} - {1}\".format(res, safe_unicode(advice_info[\"delay_label\"]))\n return res\n\n\nclass MeetingAdvice(Container):\n \"\"\" \"\"\"\n\n implements(IMeetingAdvice)\n # avoid inherited roles from the item or the item editor may edit the advice...\n __ac_local_roles_block__ = True\n\n security = ClassSecurityInfo()\n\n def getPrettyLink(self, **kwargs):\n \"\"\"Return the IPrettyLink version of the title.\"\"\"\n adapted = IPrettyLink(self)\n adapted.showContentIcon = True\n for k, v in kwargs.items():\n setattr(adapted, k, v)\n return adapted.getLink()\n\n def Title(self):\n '''\n This will construct the title of the advice, moreover, it checks for access\n to a confidential advice.\n '''\n # check that current user is not accessing to an advice that is confidential\n # to him but for which he knows the url to access to...\n parent = self.getParentNode()\n # in some case with plone.restapi summary serialize,\n # the parent is not found because self does not have acquisition\n if not parent:\n return \"\"\n if self.advice_group in parent.adviceIndex \\\n and parent.adviceIndex[self.advice_group]['isConfidential']:\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n is_confidential_power_observer = tool.isPowerObserverForCfg(\n cfg, cfg.getAdviceConfidentialFor())\n if not parent._adviceIsViewableForCurrentUser(\n cfg,\n is_confidential_power_observer,\n parent.adviceIndex[self.advice_group]):\n raise Unauthorized\n\n # when creating a new advice object, it still not exist in parent's adviceIndex\n label = u\"\"\n if self.advice_group in parent.adviceIndex:\n label = get_advice_label(parent.adviceIndex[self.advice_group])\n # we can not return a translated msg using _ so translate it\n return translate(\n \"Advice ${advice_label} given on item ${item_title}\",\n mapping={'item_title': unicode(parent.Title(), 'utf-8'),\n 'advice_label': label},\n domain=\"PloneMeeting\",\n default=\"Advice given on item\",\n context=self.REQUEST)\n\n def title_or_id(self):\n \"\"\" \"\"\"\n return self.Title()\n\n def query_state(self):\n '''In what state am I ?'''\n wfTool = api.portal.get_tool('portal_workflow')\n return wfTool.getInfoFor(self, 'review_state')\n\n security.declarePublic('wfConditions')\n\n def wfConditions(self):\n '''Returns the adapter that implements the interface that proposes\n methods for use as conditions in the workflow associated with this\n item.'''\n return getWorkflowAdapter(self, conditions=True)\n\n security.declarePublic('wfActions')\n\n def wfActions(self):\n '''Returns the adapter that implements the interface that proposes\n methods for use as actions in the workflow associated with this\n item.'''\n return getWorkflowAdapter(self, conditions=False)\n\n def _updateAdviceRowId(self):\n '''Make sure advice_row_id is correct.'''\n # the row_id is stored in parent (item) adviceIndex\n item = self.getParentNode()\n\n # if a powerAdviser is adding an advice, the advice_group is not\n # in the item.adviceIndex, so if not found, check that\n if self.advice_group in item.adviceIndex:\n adviceInfo = item.adviceIndex[self.advice_group]\n row_id = adviceInfo['row_id']\n else:\n # check if it is actually a power adviser adding a not asked advice\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(item)\n if self.advice_group in cfg.getPowerAdvisersGroups():\n row_id = ''\n else:\n raise KeyError('Not able to find a value to set for advice row_id!')\n self.advice_row_id = row_id\n\n def get_advice_given_on(self):\n '''Return the date the advice was given on.\n Returns the smallest date between modified() and last event 'giveAdvice'.\n This manages case when advice is edited after it is given, for example\n when a MeetingManager corrects a typo, the advice_given_on date will be\n the 'giveAdvice' date.'''\n lastEvent = getLastWFAction(self, 'giveAdvice')\n modified = self.modified()\n if not lastEvent:\n return modified\n else:\n return min(lastEvent['time'], modified)\n\n def historize_if_relevant(self, comment):\n \"\"\"Historize if self was never historized or\n if it was modified since last version.\"\"\"\n # only historize advice if it was modified since last historization\n # and if it is not 'asked_again', indeed we do not historize an advice\n # that is 'asked_again' of it's predecessor would be an advice 'asked_again' too...\n if self.advice_type != 'asked_again' and isModifiedSinceLastVersion(self):\n historize_object_data(self, comment=comment)\n\n # def attribute_is_used_cachekey(method, self, name):\n # '''cachekey method for self.attribute_is_used.'''\n # return \"{0}.{1}\".format(self.portal_type, name)\n\n security.declarePublic('attribute_is_used')\n\n # @ram.cache(attribute_is_used_cachekey)\n def attribute_is_used(self, name):\n '''Necessary for utils._addManagedPermissions for advice for now\n any attribute is used ?'''\n return True\n\n def getIndexesRelatedTo(self, related_to='annex', check_deferred=True):\n '''See doc in interfaces.py.'''\n idxs = ['SearchableText']\n return idxs\n\n security.declarePublic('adapted')\n\n def adapted(self):\n '''Make adapted method available on advice, but actually no adapter\n can be defined, just return self.'''\n return self\n\n def get_previous_advice_type(self):\n \"\"\" \"\"\"\n adapter = getAdapter(self, IImioHistory, 'advice_given')\n last_event = getLastAction(adapter)\n prev_advice_type = None\n if last_event:\n prev_advice_type = get_event_field_data(\n last_event[\"advice_data\"], \"advice_type\")\n return prev_advice_type\n\n\nclass MeetingAdviceSchemaPolicy(DexteritySchemaPolicy):\n \"\"\" \"\"\"\n\n def bases(self, schemaName, tree):\n return (IMeetingAdvice, )\n\n\nclass AdviceGroupVocabulary(object):\n implements(IVocabularyFactory)\n\n def __call__(self, context):\n \"\"\"\"\"\"\n terms = []\n tool = api.portal.get_tool('portal_plonemeeting')\n advicePortalTypeIds = tool.getAdvicePortalTypeIds()\n\n # take into account groups for wich user can add an advice\n # while adding an advice, the context is his parent, aka a MeetingItem\n alterable_advice_org_uids = []\n if context.meta_type == 'MeetingItem':\n alterable_advice_org_uids = context.getAdvicesGroupsInfosForUser(compute_to_edit=False)[0]\n # take into account groups for which user can edit an advice\n elif context.portal_type in advicePortalTypeIds:\n alterable_advice_org_uids = context.getAdvicesGroupsInfosForUser(compute_to_add=False)[1]\n # make sure advice_group selected on advice is in the vocabulary\n if context.advice_group not in alterable_advice_org_uids:\n alterable_advice_org_uids.append(context.advice_group)\n\n # manage case where we have several meetingadvice portal_types\n # depending on current portal_type, clean up selectable orgs\n itemObj = context.meta_type == 'MeetingItem' and context or context.getParentNode()\n current_portal_type = findMeetingAdvicePortalType(context)\n alterable_advice_org_uids = [\n org_uid for org_uid in alterable_advice_org_uids\n if (itemObj.adapted()._advicePortalTypeForAdviser(org_uid) == current_portal_type or\n (context.portal_type in advicePortalTypeIds and org_uid == context.advice_group))]\n\n # create vocabulary\n for alterable_advice_org_uid in alterable_advice_org_uids:\n org = get_organization(alterable_advice_org_uid)\n terms.append(SimpleTerm(alterable_advice_org_uid,\n alterable_advice_org_uid,\n org.get_full_title()))\n return SimpleVocabulary(terms)\n\n\nclass AdviceTypeVocabulary(object):\n implements(IVocabularyFactory)\n\n def __call__(self, context):\n \"\"\" \"\"\"\n terms = []\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(context)\n advicePortalTypeIds = tool.getAdvicePortalTypeIds()\n\n # manage when portal_type accessed from the Dexterity types configuration\n if cfg:\n usedAdviceTypes = list(cfg.getUsedAdviceTypes())\n\n # now wipeout usedAdviceTypes depending on current meetingadvice portal_type\n itemObj = context.meta_type == 'MeetingItem' and context or context.getParentNode()\n current_portal_type = findMeetingAdvicePortalType(context)\n usedAdviceTypes = [\n usedAdviceType for usedAdviceType in usedAdviceTypes\n if usedAdviceType in itemObj.adapted()._adviceTypesForAdviser(current_portal_type)]\n\n # make sure if an adviceType was used for context and it is no more available, it\n # appears in the vocabulary and is so useable...\n if context.portal_type in advicePortalTypeIds and \\\n context.advice_type not in usedAdviceTypes:\n usedAdviceTypes.append(context.advice_type)\n for advice_id, advice_title in cfg.listAdviceTypes(include_asked_again=True).items():\n if advice_id in usedAdviceTypes:\n terms.append(SimpleTerm(advice_id, advice_id, advice_title))\n return SimpleVocabulary(terms)\n","repo_name":"IMIO/Products.PloneMeeting","sub_path":"src/Products/PloneMeeting/content/advice.py","file_name":"advice.py","file_ext":"py","file_size_in_byte":15028,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"5030063679","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Preprocessing\n\n# In[1]:\n\n\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.stem import WordNetLemmatizer\nwordnet_lemmatizer = WordNetLemmatizer()\nfrom nltk.stem.porter import PorterStemmer\nporter_stemmer = PorterStemmer()\nimport re\nimport inflect\ndef Pre_Processing(file):\n token_files=[]\n after_lower=[]\n after_lemmatizer=[]\n after_stemming=[]\n tokenizer = RegexpTokenizer(r'\\w+')\n tokens=(tokenizer.tokenize(file))\n p = inflect.engine()\n token_files=[]\n for i in range(len(tokens)):\n if tokens[i].isnumeric() and len(tokens[i])<36:\n tem=p.number_to_words((tokens[i]))\n tokenizer = RegexpTokenizer(r'\\w+')\n temp=(tokenizer.tokenize(tem))\n for x in temp:\n token_files.append(x)\n elif tokens[i].isnumeric() and len(tokens[i])>36:\n for j in range(len(tokens[i])):\n token_files.append(p.number_to_words((tokens[i][j])))\n else:\n token_files.append(tokens[i])\n for i in range(len(token_files)):\n after_lower.append(token_files[i].lower())\n for i in range(len(after_lower)):\n after_stemming.append(porter_stemmer.stem(after_lower[i]))\n return after_stemming\n\n\n# # TF_IDF Calculation\n\n# In[2]:\n\n\nimport os\nimport codecs\ndoc=0\nTF_Dictionary={}\nparent=\"C:/Users/Devashi Jain/Desktop/Information Retrieval/Assignment2/stories/stories\"\nfor file_name in os.listdir(os.path.join(parent)): \n preprossed_file=[]\n if os.path.isdir(parent+'/'+file_name):\n for file in os.listdir(os.path.join(parent,file_name)):\n if file=='index.html':\n print(\"no\")\n else:\n fd=codecs.open(parent+'/'+file_name+'/'+file,'r',errors='ignore',encoding='utf-8')\n preprossed_file=Pre_Processing(fd.read())\n for i in range(len(preprossed_file)):\n if preprossed_file[i] in TF_Dictionary:\n if file in TF_Dictionary[preprossed_file[i]]:\n TF_Dictionary[preprossed_file[i]][file]+=1\n else:\n TF_Dictionary[preprossed_file[i]][file]=1\n else:\n TF_Dictionary[preprossed_file[i]] = {file:1}\n for term in TF_Dictionary:\n for j in TF_Dictionary[term]:\n if(j==file):\n TF_Dictionary[term][j]/=len(preprossed_file)\n\n else:\n if file_name=='index.html':\n print(\"no\")\n else:\n fd=codecs.open(parent+'/'+file_name,'r',errors='ignore',encoding='utf-8')\n preprossed_file=Pre_Processing(fd.read())\n for i in range(len(preprossed_file)):\n if preprossed_file[i] in TF_Dictionary:\n if file_name in TF_Dictionary[preprossed_file[i]]:\n TF_Dictionary[preprossed_file[i]][file_name]+=1\n else:\n TF_Dictionary[preprossed_file[i]][file_name]=1\n else:\n TF_Dictionary[preprossed_file[i]] = {file_name:1}\n for term in TF_Dictionary:\n for j in TF_Dictionary[term]:\n if(file_name==j):\n TF_Dictionary[term][j]/=len(preprossed_file)\n\n\n# In[3]:\n\n\nimport math\nfor term in TF_Dictionary:\n IDF=1+math.log(467/len(TF_Dictionary[term]))\n for file in TF_Dictionary[term]:\n TF_Dictionary[term][file]*=IDF\n\n\n# In[4]:\n\n\nimport pandas as pd\nFile_title=pd.DataFrame()\nFile_Title_SRE=pd.DataFrame()\nFile_title=pd.read_html('C:/Users/Devashi Jain/Desktop/Information Retrieval/Assignment2/stories/stories/index.html', flavor='bs4')\nFile_Title_SRE=pd.read_html('C:/Users/Devashi Jain/Desktop/Information Retrieval/Assignment2/stories/stories/SRE/index.html',flavor='bs4')\n\n\n# In[5]:\n\n\nTitle=pd.concat([File_title[0],File_Title_SRE[0]])\nTitle=Title.drop([1],axis=1)\nTitle=Title.reset_index(drop=True)\nTitle=Title.drop([0,1,2,3,452])\nTitle=Title.reset_index(drop=True)\nTitle\n\n\n# In[6]:\n\n\nTitle_Dictionary={}\nfor i in range(len(Title)):\n Title_Dictionary.update({Title[0][i]:Title[2][i]})\n\n\n# In[7]:\n\n\nTF_Title_Dictionary={}\nfor term in Title_Dictionary:\n preprossed_file=Pre_Processing(Title_Dictionary[term])\n for i in range(len(preprossed_file)):\n if preprossed_file[i] in TF_Title_Dictionary:\n if term in TF_Title_Dictionary[preprossed_file[i]]:\n TF_Title_Dictionary[preprossed_file[i]][term]+=1\n else:\n TF_Title_Dictionary[preprossed_file[i]][term]=1\n else:\n TF_Title_Dictionary[preprossed_file[i]] = {term:1}\n for word in TF_Title_Dictionary:\n for file in TF_Title_Dictionary[word]:\n if(file==term):\n TF_Title_Dictionary[word][file]/=len(preprossed_file)\n\n\n# In[8]:\n\n\nimport math\nfor term in TF_Title_Dictionary:\n p = len(TF_Title_Dictionary[term])\n IDF=1+math.log(467/p)\n for file in TF_Title_Dictionary[term]:\n TF_Title_Dictionary[term][file]*=IDF\n\n\n# # QueryProcessing and Top k documents\n\n# In[11]:\n\n\nprint(\"Enter the Query\")\nquery=input()\nDocument={}\npreprossed_query=Pre_Processing(query)\nfor term in preprossed_query:\n if term in TF_Title_Dictionary:\n for key in TF_Title_Dictionary[term].keys():\n if key in Document:\n Document[key]+=TF_Title_Dictionary[term][key]\n else:\n Document[key]=TF_Title_Dictionary[term][key] \n if term in TF_Dictionary:\n for key in TF_Dictionary[term].keys():\n if key in Document:\n Document[key]+=TF_Dictionary[term][key]\n else:\n Document[key]= TF_Dictionary[term][key]\ntopk=(sorted(Document.items(), key = lambda kv:(kv[1], kv[0] ),reverse=True)) \nprint(len(topk))\nprint(\"enter the top doc\")\nt=int(input())\nif t2->4, 1->3->4\n输出:1->1->2->3->4->4\n\n21. Merge Two Sorted Lists\nMerge two sorted linked lists and return it as a new list. The new list should be made by splicing together the nodes of the first two lists.\n\nExample:\n\nInput: 1->2->4, 1->3->4\nOutput: 1->1->2->3->4->4\n'''\n\n\n# 递归\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def mergeTwoLists(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n # if not l1:\n # return l2\n # if not l2:\n # return l1\n # if l1.val <= l2.val:\n # l1.next = self.mergeTwoLists(l1.next, l2)\n # return l1\n # elif l1.val > l2.val:\n # l2.next = self.mergeTwoLists(l1, l2.next)\n # return l2\n\n if l1 and l2:\n if l1.val < l2.val:\n l1.next = self.mergeTwoLists(l1.next, l2)\n return l1\n else:\n l2.next = self.mergeTwoLists(l1, l2.next)\n return l2\n return l1 if not l2 else l2\n\n\n# 迭代\n# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution1(object):\n def mergeTwoLists(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n if not l1:\n return l2\n if not l2:\n return l1\n pre = ListNode(0)\n cur = pre\n while l1 and l2:\n if l1.val <= l2.val:\n cur.next = l1\n l1 = l1.next\n else:\n cur.next = l2\n l2 = l2.next\n cur = cur.next\n\n cur.next = l1 if l1 else l2\n return pre.next\n\n\n\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def mergeTwoLists(self, list1: Optional[ListNode], list2: Optional[ListNode]) -> Optional[ListNode]:\n new_head = ListNode(0)\n dummy = new_head\n head1 = list1\n head2 = list2\n while head1 or head2:\n value = 0\n if head1 and head2:\n if head1.val < head2.val:\n value = head1.val \n head1 = head1.next\n else:\n value = head2.val\n head2 = head2.next\n elif head1:\n value = head1.val\n head1 = head1.next\n else:\n value = head2.val\n head2 = head2.next\n new_node = ListNode(val=value)\n new_head.next = new_node\n new_head = new_head.next\n # print(dummy)\n return dummy.next\n\n","repo_name":"MecaCho/algorithms_training","sub_path":"algorithms/linked_list/leetcode-21-MergeTwoSortedLists.py","file_name":"leetcode-21-MergeTwoSortedLists.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37808187030","text":"# This script is to get the Spotipy data from the Spotipy API\n# and write it in a appropriate format to a CSV file\n\n# Importing the libraries\nimport spotipy, json, csv\nfrom secrets import *\nfrom spotipy.oauth2 import SpotifyClientCredentials\n\n#Authentication - without user\nclient_credentials_manager = SpotifyClientCredentials(client_id=client_id, client_secret=client_secret)\nsp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)\n\n# Playlist IDs\nplaylist_ids = [\n '75kU3tpSUe0Z487yLZ54kn?si=521c58145365425f', # liked songs\n '5XmE92oPSOfBf7v11zIVsW?si=07f9320cfbe24c5e' # unliked songs\n]\n\n# Audio features\nfeature_names = [\n 'acousticness',\n 'danceability',\n 'duration_ms',\n 'energy',\n 'instrumentalness',\n 'key',\n 'liveness',\n 'loudness',\n 'mode',\n 'speechiness',\n 'tempo',\n 'time_signature',\n 'valence'\n]\n\n# Function to get all the tracks of a playlist\ndef get_playlist_tracks(playlist_id):\n tracks_response = sp.playlist_items(playlist_id)['tracks']\n tracks = tracks_response['items']\n while tracks_response['next']:\n tracks_response = sp.next(tracks_response)\n tracks.extend(tracks_response['items'])\n return tracks\n\n# Function to get the features of a track\ndef get_features(track_id):\n features_response = sp.audio_features(track_id)\n features_json = json.dumps(features_response)\n features_data = json.loads(features_json)\n features_values = []\n for feature in feature_names:\n features_values.append(features_data[0][feature])\n return features_values\n\n# Write data to CSV file\ndata_file = open('data.csv', 'w')\nwriter = csv.writer(data_file)\n\n# Write the header\nwriter.writerow(['track_num', 'track_id', 'track_name', 'first_artist'] + feature_names + ['liked'])\n\nprint(\"Writing to the CSV file:\")\nprint(\"# Track\")\nprint(\"----------------------------\")\n\nrow_num = 1\nfor playlist_id in playlist_ids:\n tracks = get_playlist_tracks(playlist_id)\n for track in tracks:\n track_id = track['track']['id']\n track_name = track['track']['name']\n first_artist = track['track']['artists'][0]['name']\n features = get_features(track_id)\n try:\n if playlist_id == playlist_ids[0]:\n writer.writerow([row_num, track_id, track_name, first_artist] + features + [1])\n else:\n writer.writerow([row_num, track_id, track_name, first_artist] + features + [0])\n print(str(row_num) + \". \" + track_name)\n row_num += 1\n except:\n print(\"error in csv writing\")\n\nprint(\"\\nFile is ready.\")\n\n# Closing the file\ndata_file.close()\n","repo_name":"ataarslaner/Spotify-Song-Recommendation-System","sub_path":"auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22386540855","text":"from flask import Flask\nfrom sqlalchemy import create_engine, Column, Integer, String, Table, ForeignKey\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker, relationship, backref\n\napp = Flask(__name__)\napp.config.from_object('config')\nengine = create_engine(app.config['SQLALCHEMY_DATABASE_URI'], max_overflow=5)\nBase = declarative_base()\n\narticle_tag = Table(\n 'article_tag',\n # 组合主键\n Base.metadata,\n Column('article_id', Integer, ForeignKey('article.id'), primary_key=True),\n Column('tag_id', Integer, ForeignKey('tag.id'), primary_key=True)\n)\n\nclass Article(Base):\n __tablename__ = 'article'\n id = Column(Integer, primary_key=True, autoincrement=True)\n title = Column(String(100), nullable=False)\n\n tags = relationship('Tag', secondary=article_tag, backref=backref('articles')) # 关联关系设置\n\nclass Tag(Base):\n __tablename__ = 'tag'\n id = Column(Integer, primary_key=True, autoincrement=True)\n name = Column(String(100), nullable=False)\n\nBase.metadata.create_all(engine)\n\n@app.route('/')\ndef index():\n\n Session = sessionmaker(bind=engine)\n session =Session()\n\n # article1 = Article(title='aaa')\n # article2 = Article(title='bbb')\n\n # tag1 = Tag(name='111')\n # tag2 = Tag(name='222')\n\n # 多对多关系建立\n # article1.tags.append(tag1)\n # article1.tags.append(tag2)\n # article2.tags.append(tag1)\n # article2.tags.append(tag2)\n\n # session.add(article1)\n # session.add(article2)\n # session.add(tag1)\n # session.add(tag2)\n\n # session.commit()\n\n article1 = session.query(Article).filter_by(title='aaa').first()\n tags = article1.tags\n for tag in tags:\n print(tag.name)\n\n return 'hello world'\n\n\nif __name__ == '__main__':\n app.run()","repo_name":"flyingtothe/flask_study","sub_path":"app_10_db4.py","file_name":"app_10_db4.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1872420716","text":"from flask import Blueprint, render_template, url_for, redirect, request, flash\nfrom flask.views import View, MethodView\nfrom forms.bill_form import BillerForm, ScheduleForm\n\nbill = Blueprint('bill', __name__)\n\n\nclass BillPayView(View):\n methods = ['POST', 'GET']\n decorators = []\n\n def dispatch_request(self):\n form = BillerForm()\n dd_form = ScheduleForm()\n if request.method == 'POST':\n print(\"inside the post\")\n if not form.validate_on_submit():\n print(\"please enter the correct details\")\n return redirect(url_for('bill.bill_pay'))\n else:\n print(\"valid entries\")\n account = request.form['account']\n amount = request.form['amount']\n bill_type = request.form['billType']\n print(f\" account is : {account} and the amount is {amount} and the biller type is {bill_type}\")\n return redirect(url_for('bill.bill_pay'))\n return render_template('bill_payment.html', form=form, ddform=dd_form)\n\n\nclass PaymentScheduleView(View):\n methods = ['POST', 'GET']\n decorators = []\n\n def dispatch_request(self):\n dd_form = ScheduleForm(request.form)\n form = BillerForm()\n\n if request.method == 'POST':\n biller = dd_form.biller.data\n service_account = dd_form.service_account.data\n frequency = dd_form.frequency.data\n start_date = dd_form.start_date.data\n amount = dd_form.amount.data\n accept_toc = dd_form.accept_toc.data\n\n flash(\"form validated well\")\n return redirect(url_for('bill.schedule_pay'))\n return render_template('bill_payment.html', form=form, ddform=dd_form)\n\n\nbill.add_url_rule('/bill/', view_func=BillPayView.as_view('bill_pay'))\nbill.add_url_rule('/bill/schedule_payment', view_func=PaymentScheduleView.as_view('schedule_pay'))\n","repo_name":"trustmub/ebanking","sub_path":"views/bill.py","file_name":"bill.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"882804591","text":"from __future__ import division\r\nimport pygame\r\nimport random\r\nfrom os import path\r\n\r\n# Assets\r\nimg_dir = path.join(path.dirname(__file__), 'assets')\r\n\r\n\r\nWIDTH = 800\r\nHEIGHT = 600\r\nFPS = 60\r\nPOWERUP_TIME = 5000\r\nBAR_LENGTH = 100\r\nBAR_HEIGHT = 10\r\n\r\n# Colors\r\nWHITE = (255, 255, 255)\r\nBLACK = (0, 0, 0)\r\nRED = (255, 0, 0)\r\nGREEN = (0, 255, 0)\r\nWIN_GREEN = (51, 102, 0)\r\nBLUE = (0, 0, 255)\r\nYELLOW = (255, 255, 0)\r\n\r\n# Init\r\npygame.init()\r\npygame.mixer.init()\r\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\r\npygame.display.set_caption(\"Commet Runner\")\r\nclock = pygame.time.Clock()\r\n\r\nfont_name = pygame.font.match_font('arial')\r\n\r\n\r\n# Won game\r\ndef won_menu():\r\n global screen\r\n\r\n pygame.display.update()\r\n\r\n screen.fill(WIN_GREEN)\r\n draw_text(screen, \"The traveler has arrived his destination.\",\r\n 40, WIDTH/2, HEIGHT/2 - 20)\r\n draw_text(screen, \"Congratilations!\", 40, WIDTH/2, HEIGHT/2 + 20)\r\n pygame.display.update()\r\n\r\n\r\n# Win menu\r\ndef win_menu():\r\n global screen\r\n\r\n pygame.display.update()\r\n\r\n while True:\r\n ev = pygame.event.poll()\r\n if ev.type == pygame.KEYDOWN:\r\n if ev.key == pygame.K_RETURN:\r\n break\r\n elif ev.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n else:\r\n draw_text(screen, \"You won the level\", 30, WIDTH/2, HEIGHT/2)\r\n draw_text(screen, \"Press [ENTER]\", 30, WIDTH/2, HEIGHT/2 + 40)\r\n pygame.display.update()\r\n\r\n pygame.display.update()\r\n\r\n\r\n# First menu\r\ndef main_menu():\r\n global screen\r\n\r\n pygame.display.update()\r\n\r\n while True:\r\n ev = pygame.event.poll()\r\n if ev.type == pygame.KEYDOWN:\r\n if ev.key == pygame.K_RETURN:\r\n break\r\n elif ev.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n else:\r\n draw_text(screen, \"Press [ENTER]\", 30, WIDTH/2, HEIGHT/2)\r\n pygame.display.update()\r\n\r\n screen.fill(BLACK)\r\n draw_text(screen, \"Level 1\", 40, WIDTH/2, HEIGHT/2)\r\n pygame.display.update()\r\n\r\n\r\ndef lvl2_menu():\r\n global screen\r\n\r\n pygame.display.update()\r\n\r\n screen.fill(BLACK)\r\n draw_text(screen, \"Level 2\", 40, WIDTH/2, HEIGHT/2)\r\n pygame.display.update()\r\n\r\n\r\ndef lvl3_menu():\r\n global screen\r\n\r\n pygame.display.update()\r\n\r\n screen.fill(BLACK)\r\n draw_text(screen, \"Level 3\", 40, WIDTH/2, HEIGHT/2)\r\n pygame.display.update()\r\n\r\n\r\n# Score\r\ndef draw_text(surf, text, size, x, y):\r\n font = pygame.font.Font(font_name, size)\r\n text_surface = font.render(text, True, WHITE)\r\n text_rect = text_surface.get_rect()\r\n text_rect.midtop = (x, y)\r\n surf.blit(text_surface, text_rect)\r\n\r\n# Shield Bar\r\n\r\n\r\ndef draw_shield_bar(surf, x, y, pct):\r\n pct = max(pct, 0)\r\n fill = (pct / 100) * BAR_LENGTH\r\n outline_rect = pygame.Rect(x, y, BAR_LENGTH, BAR_HEIGHT)\r\n fill_rect = pygame.Rect(x, y, fill, BAR_HEIGHT)\r\n pygame.draw.rect(surf, GREEN, fill_rect)\r\n pygame.draw.rect(surf, WHITE, outline_rect, 2)\r\n\r\n# Lives\r\n\r\n\r\ndef draw_lives(surf, x, y, lives, img):\r\n for i in range(lives):\r\n img_rect = img.get_rect()\r\n img_rect.x = x + 30 * i\r\n img_rect.y = y\r\n surf.blit(img, img_rect)\r\n\r\n\r\n# New mob\r\ndef newmob():\r\n mob_element = Mob()\r\n all_sprites.add(mob_element)\r\n mobs.add(mob_element)\r\n\r\n\r\nclass Player(pygame.sprite.Sprite):\r\n\r\n def __init__(self):\r\n pygame.sprite.Sprite.__init__(self)\r\n\r\n self.image = pygame.transform.scale(player_img, (50, 38))\r\n self.image.set_colorkey(BLACK)\r\n self.rect = self.image.get_rect()\r\n self.radius = 20\r\n self.rect.centerx = WIDTH / 2\r\n self.rect.bottom = HEIGHT - 10\r\n self.speedx = 0\r\n self.shield = 100\r\n self.shoot_delay = 250\r\n self.last_shot = pygame.time.get_ticks()\r\n self.lives = 3\r\n self.hidden = False\r\n self.hide_timer = pygame.time.get_ticks()\r\n self.power = 1\r\n self.power_timer = pygame.time.get_ticks()\r\n\r\n def update(self):\r\n\r\n if self.power >= 2 and pygame.time.get_ticks() - self.power_time > POWERUP_TIME:\r\n self.power -= 1\r\n self.power_time = pygame.time.get_ticks()\r\n\r\n # unhide\r\n if self.hidden and pygame.time.get_ticks() - self.hide_timer > 1000:\r\n self.hidden = False\r\n self.rect.centerx = WIDTH / 2\r\n self.rect.bottom = HEIGHT - 30\r\n\r\n self.speedx = 0\r\n\r\n # Key pressed\r\n keystate = pygame.key.get_pressed()\r\n if keystate[pygame.K_LEFT]:\r\n self.speedx = -5\r\n elif keystate[pygame.K_RIGHT]:\r\n self.speedx = 5\r\n\r\n # Shoot\r\n if keystate[pygame.K_SPACE]:\r\n self.shoot()\r\n\r\n if self.rect.right > WIDTH:\r\n self.rect.right = WIDTH\r\n if self.rect.left < 0:\r\n self.rect.left = 0\r\n\r\n self.rect.x += self.speedx\r\n\r\n def shoot(self):\r\n # to tell the bullet where to spawn\r\n now = pygame.time.get_ticks()\r\n if now - self.last_shot > self.shoot_delay:\r\n self.last_shot = now\r\n\r\n # Single bullet\r\n if self.power == 1:\r\n bullet = Bullet(self.rect.centerx, self.rect.top)\r\n all_sprites.add(bullet)\r\n bullets.add(bullet)\r\n\r\n # Double bullets\r\n if self.power == 2:\r\n bullet1 = Bullet(self.rect.left, self.rect.centery)\r\n bullet2 = Bullet(self.rect.right, self.rect.centery)\r\n all_sprites.add(bullet1)\r\n all_sprites.add(bullet2)\r\n bullets.add(bullet1)\r\n bullets.add(bullet2)\r\n\r\n # Triple bullets\r\n if self.power >= 3:\r\n bullet1 = Bullet(self.rect.left, self.rect.centery)\r\n bullet2 = Bullet(self.rect.right, self.rect.centery)\r\n missile1 = Missile(self.rect.centerx, self.rect.top)\r\n all_sprites.add(bullet1)\r\n all_sprites.add(bullet2)\r\n all_sprites.add(missile1)\r\n bullets.add(bullet1)\r\n bullets.add(bullet2)\r\n bullets.add(missile1)\r\n\r\n # powerup\r\n def powerup(self):\r\n self.power += 1\r\n self.power_time = pygame.time.get_ticks()\r\n\r\n def hide(self):\r\n self.hidden = True\r\n self.hide_timer = pygame.time.get_ticks()\r\n self.rect.center = (WIDTH / 2, HEIGHT + 200)\r\n\r\n\r\n# Mobs class\r\nclass Mob(pygame.sprite.Sprite):\r\n def __init__(self):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image_orig = random.choice(meteor_images)\r\n self.image_orig.set_colorkey(BLACK)\r\n self.image = self.image_orig.copy()\r\n self.rect = self.image.get_rect()\r\n self.radius = int(self.rect.width * .90 / 2)\r\n self.rect.x = random.randrange(0, WIDTH - self.rect.width)\r\n self.rect.y = random.randrange(-150, -100)\r\n self.speedy = random.randrange(5, 10)\r\n self.speedx = random.randrange(-3, 3)\r\n\r\n # Rotation\r\n self.rotation = 0\r\n self.rotation_speed = random.randrange(-8, 8)\r\n self.last_update = pygame.time.get_ticks()\r\n\r\n # Rotate the mob elements\r\n\r\n def rotate(self):\r\n time_now = pygame.time.get_ticks()\r\n if time_now - self.last_update > 50: # milliseconds\r\n self.last_update = time_now\r\n self.rotation = (self.rotation + self.rotation_speed) % 360\r\n new_image = pygame.transform.rotate(self.image_orig, self.rotation)\r\n old_center = self.rect.center\r\n self.image = new_image\r\n self.rect = self.image.get_rect()\r\n self.rect.center = old_center\r\n\r\n def update(self):\r\n self.rotate()\r\n self.rect.x += self.speedx\r\n self.rect.y += self.speedy\r\n\r\n if (self.rect.top > HEIGHT + 10) or (self.rect.left < -25) or (self.rect.right > WIDTH + 20):\r\n self.rect.x = random.randrange(0, WIDTH - self.rect.width)\r\n self.rect.y = random.randrange(-100, -40)\r\n self.speedy = random.randrange(1, 8)\r\n\r\n# Power up\r\n\r\n\r\nclass Pow(pygame.sprite.Sprite):\r\n def __init__(self, center):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.type = random.choice(['shield', 'gun'])\r\n self.image = powerup_images[self.type]\r\n self.image.set_colorkey(BLACK)\r\n self.rect = self.image.get_rect()\r\n self.rect.center = center\r\n self.speedy = 2\r\n\r\n def update(self):\r\n self.rect.y += self.speedy\r\n if self.rect.top > HEIGHT:\r\n self.kill()\r\n\r\n\r\n# Bullets class\r\nclass Bullet(pygame.sprite.Sprite):\r\n def __init__(self, x, y):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image = bullet_img\r\n self.image.set_colorkey(BLACK)\r\n self.rect = self.image.get_rect()\r\n self.rect.bottom = y\r\n self.rect.centerx = x\r\n self.speedy = -10\r\n\r\n def update(self):\r\n self.rect.y += self.speedy\r\n if self.rect.bottom < 0:\r\n self.kill()\r\n\r\n\r\n# Missile class\r\nclass Missile(pygame.sprite.Sprite):\r\n def __init__(self, x, y):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image = missile_img\r\n self.image.set_colorkey(BLACK)\r\n self.rect = self.image.get_rect()\r\n self.rect.bottom = y\r\n self.rect.centerx = x\r\n self.speedy = -10\r\n\r\n def update(self):\r\n self.rect.y += self.speedy\r\n if self.rect.bottom < 0:\r\n self.kill()\r\n\r\n\r\n# Background image\r\nbackground = pygame.image.load(path.join(img_dir, 'universe.jpeg')).convert()\r\nbackground_rect = background.get_rect()\r\n\r\n# Player images\r\nplayer_img = pygame.image.load(\r\n path.join(img_dir, 'nave-espacial.png')).convert()\r\nplayer_mini_img = pygame.transform.scale(player_img, (25, 19))\r\nplayer_mini_img.set_colorkey(BLACK)\r\n\r\n# Bullets images\r\nbullet_img = pygame.image.load(path.join(img_dir, 'misil.png')).convert()\r\nmissile_img = pygame.image.load(\r\n path.join(img_dir, 'misil.png')).convert_alpha()\r\nmeteor_img = pygame.image.load(\r\n path.join(img_dir, 'meteoro3.png')).convert()\r\nmeteor_images = []\r\nmeteor_list = [\r\n 'meteoro1.png',\r\n 'meteoro2.png',\r\n 'meteoro3.png',\r\n 'meteoro4.png',\r\n 'meteoro5.png',\r\n 'meteoro6.png',\r\n 'meteoro7.png'\r\n]\r\n\r\nfor image in meteor_list:\r\n meteor_images.append(pygame.image.load(\r\n path.join(img_dir, image)).convert())\r\n\r\n# load power ups\r\npowerup_images = {}\r\npowerup_images['shield'] = pygame.image.load(\r\n path.join(img_dir, 'escudo.png')).convert()\r\npowerup_images['gun'] = pygame.image.load(\r\n path.join(img_dir, 'buff.png')).convert()\r\n\r\n\r\n# Game loop\r\nrunning = True\r\nmenu_display = True\r\nlvl2 = False\r\nlvl3 = False\r\n\r\nwhile running:\r\n if menu_display:\r\n main_menu()\r\n pygame.time.wait(1000)\r\n\r\n menu_display = False\r\n\r\n # Group all sprites\r\n all_sprites = pygame.sprite.Group()\r\n player = Player()\r\n all_sprites.add(player)\r\n\r\n # Spawn mobs\r\n mobs = pygame.sprite.Group()\r\n for i in range(3):\r\n newmob()\r\n\r\n # Group for bullets\r\n bullets = pygame.sprite.Group()\r\n powerups = pygame.sprite.Group()\r\n\r\n # Score board variable\r\n score = 0\r\n\r\n elif lvl2:\r\n lvl2_menu()\r\n pygame.time.wait(1000)\r\n\r\n lvl2 = False\r\n\r\n # Group all sprites\r\n all_sprites = pygame.sprite.Group()\r\n player = Player()\r\n all_sprites.add(player)\r\n\r\n # Spawn mobs\r\n mobs = pygame.sprite.Group()\r\n for i in range(8):\r\n newmob()\r\n\r\n # Group for bullets\r\n bullets = pygame.sprite.Group()\r\n powerups = pygame.sprite.Group()\r\n\r\n # Score board variable\r\n score = 350\r\n\r\n elif lvl3:\r\n lvl3_menu()\r\n pygame.time.wait(1000)\r\n\r\n lvl3 = False\r\n\r\n # Group all sprites\r\n all_sprites = pygame.sprite.Group()\r\n player = Player()\r\n all_sprites.add(player)\r\n\r\n # Spawn mobs\r\n mobs = pygame.sprite.Group()\r\n for i in range(12):\r\n newmob()\r\n\r\n # Group for bullets\r\n bullets = pygame.sprite.Group()\r\n powerups = pygame.sprite.Group()\r\n\r\n # Score board variable\r\n score = 600\r\n\r\n # 1 Process input/events\r\n clock.tick(FPS)\r\n for event in pygame.event.get():\r\n\r\n if event.type == pygame.QUIT:\r\n running = False\r\n\r\n # Press ESC to exit game\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n running = False\r\n\r\n # event for shooting the bullets\r\n elif event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_SPACE:\r\n player.shoot()\r\n\r\n all_sprites.update()\r\n\r\n # Bullet collision\r\n hits = pygame.sprite.groupcollide(mobs, bullets, True, True)\r\n for hit in hits:\r\n score += 10\r\n\r\n # Spawn new powerup\r\n if random.random() > 0.9:\r\n pow = Pow(hit.rect.center)\r\n all_sprites.add(pow)\r\n powerups.add(pow)\r\n\r\n # here we can going to lvl 2\r\n if score == 350:\r\n # menu_display = True\r\n win_menu()\r\n lvl2 = True\r\n if score == 600:\r\n win_menu()\r\n lvl3 = True\r\n if score == 1000:\r\n won_menu()\r\n pygame.time.wait(4000)\r\n running = False\r\n newmob()\r\n\r\n # Player collision\r\n hits = pygame.sprite.spritecollide(\r\n player, mobs, True, pygame.sprite.collide_circle)\r\n for hit in hits:\r\n player.shield -= hit.radius * 2\r\n newmob()\r\n if player.shield <= 0:\r\n # running = False\r\n player.hide()\r\n player.lives -= 1\r\n player.shield = 100\r\n\r\n # if the player hit a power up\r\n hits = pygame.sprite.spritecollide(player, powerups, True)\r\n for hit in hits:\r\n if hit.type == 'shield':\r\n player.shield += random.randrange(10, 30)\r\n if player.shield >= 100:\r\n player.shield = 100\r\n if hit.type == 'gun':\r\n player.powerup()\r\n\r\n # if player died and the explosion has finished, end game\r\n if player.lives == 0:\r\n running = False\r\n\r\n # 3 Draw/render\r\n screen.fill(BLACK)\r\n # draw the stargaze.png image\r\n screen.blit(background, background_rect)\r\n\r\n all_sprites.draw(screen)\r\n # 10px down from the screen\r\n draw_text(screen, str(score), 18, WIDTH / 2, 10)\r\n draw_shield_bar(screen, 5, 5, player.shield)\r\n\r\n # Draw lives\r\n draw_lives(screen, WIDTH - 100, 5, player.lives, player_mini_img)\r\n\r\n # Done after drawing everything to the screen\r\n pygame.display.flip()\r\n\r\npygame.quit()\r\n","repo_name":"Guapura89/videogame","sub_path":"juego.py","file_name":"juego.py","file_ext":"py","file_size_in_byte":14892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20763483714","text":"import seaborn as sns\nimport matplotlib.pyplot as plt\n\ntips = sns.load_dataset('tips')\n\n# Show a linear reg line\n# Add markers for division icons on hue\n# Scatter kws allows changing of marker size\nsns.lmplot(x='total_bill', y='tip', data=tips, hue='sex',\n markers=['o', 'v'], scatter_kws={'s': 100})\n\nplt.show()\n\n# Vary plot by column value, and row value\n# Similar to grids\nsns.lmplot(x='total_bill', y='tip', data=tips, col='sex', row='time')\n\nplt.show()\n\n# Add a little more variation based on hue\n# Change aspect ratio of gridded plots\nsns.lmplot(x='total_bill', y='tip', data=tips,\n col='day', hue='sex', aspect=0.6, height=8)\n\nplt.show()\n","repo_name":"Hereiam123/Python-Data-Excercises","sub_path":"Data Visualization with Seaborn/Seaborn Regression.py","file_name":"Seaborn Regression.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25934128913","text":"def addition(num1, num2):\n answer = num1 + num2\n print(answer)\n\n\ndef subtraction(num1, num2):\n answer = num1 - num2\n print(answer)\n# form times no times\n\n\ndef times(num1, num2):\n answer = 0\n\n while num2 > 0:\n if num2 % 2 == 1:\n answer += num1\n\n num1 *= 2\n num2 //= 2\n print(answer)\n\ndef division(num1, num2):\n answer = num1 // num2\n print(answer)\n\n\n# add menu\n\nchoice = input(\"add divide subtract multiply: \")\n\nnum1 = int(input(\"enter the first number: \"))\nnum2 = int(input(\"enter the second number: \"))\n\n\n\n#basics\nif choice == \"add\":\n addition(num1, num2)\n\nelif choice == \"subtract\":\n subtraction(num1, num2)\n\nelif choice == \"multiply\":\n # form times no times\n times(num1, num2)\n\n\nelse:\n division(num1, num2)\n\n\n","repo_name":"K-972/big-schoolwork-repo","sub_path":"sixth form/calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74489041972","text":"class Stats:\n def __init__(self, *args):\n self._args = args\n def mean(self):\n length = len(self._args)\n summation = sum(self._args)\n return summation / length\n def median(self):\n length = len(self._args)\n sorted_args = sorted(self._args)\n if length % 2 == 1:\n median_index = length // 2\n return sorted_args[median_index]\n else:\n middle_two_0 = length // 2 - 1\n middle_two_1 = length // 2\n middle_two_mean = (sorted_args[middle_two_0] + sorted_args[middle_two_1]) / 2\n return middle_two_mean\n def modes(self):\n # find frequencies\n counter_dict = dict()\n for arg in self._args:\n if arg in counter_dict.keys():\n counter_dict[arg] += 1\n else:\n counter_dict[arg] = 1\n # find max frequency\n max_freq = max(counter_dict.values())\n # find mode\n modes = [k for k, v in counter_dict.items() if v == max_freq]\n return modes","repo_name":"yaojenkuo/introduction-to-python","sub_path":"suggested_answers/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"13007492617","text":"from datetime import datetime, timedelta\nfrom enum import Enum\nfrom typing import Tuple\n\nimport cv2\n\n__all__ = ['Color', 'draw_text', 'draw_rectangle', 'TemporaryText']\n\n\nclass Color(Enum):\n RED = (0, 0, 255)\n GREEN = (0, 255, 0)\n BLUE = (255, 0, 0)\n\n\ndef draw_text(img,\n text: str,\n coords: Tuple[int, int] = (100, 100),\n font=cv2.FONT_HERSHEY_PLAIN,\n font_size: float = 3,\n color: Color = Color.GREEN,\n thickness: int = 2):\n cv2.putText(img,\n text, coords,\n font, font_size,\n color.value, thickness)\n\n\ndef draw_rectangle(img,\n left_top_coords: Tuple[int, int],\n right_bottom_coords: Tuple[int, int],\n color: Color = Color.GREEN,\n thickness: int = 2):\n cv2.rectangle(img, left_top_coords, right_bottom_coords, color.value, thickness)\n\n\ndef draw_point(img,\n coords: Tuple[int, int],\n radius: int = 2,\n color: Color = Color.GREEN,\n thickness: int = 2):\n cv2.circle(img, coords, radius, color.value, thickness)\n\n\nclass TemporaryText(object):\n def __init__(self, text: str, duration_in_seconds: float = 1.0, color: Color = Color.BLUE):\n self.text = text\n self.color = color\n\n self.duration_in_seconds = duration_in_seconds\n self.init_time = datetime.now()\n\n def has_expired(self) -> bool:\n return datetime.now() > self.expiration_time\n\n @property\n def expiration_time(self):\n return timedelta(seconds=self.duration_in_seconds) + self.init_time\n\n def draw(self, img):\n draw_text(img, self.text, color=self.color)","repo_name":"Midorina/CursorControlWithGestures","sub_path":"utils/drawing.py","file_name":"drawing.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71921847734","text":"# This can be deleted in the final implementation when no more testing is necessary.\nimport cv2\nimport numpy as np\nimport BoundaryTracing\nimport ImageProcessingMethods\n\n\ndef morphOp(input):\n # Close and open to remove noise and holes in contours.\n kernel = np.ones((17, 17), np.uint8)\n kernel2 = np.ones((3, 3), np.uint8)\n closing = cv2.morphologyEx(input, cv2.MORPH_CLOSE, kernel)\n opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel2)\n\n #closing = ImageProcessingMethods.closing(input)\n #opening = ImageProcessingMethods.opening(closing)\n\n return opening\n\n\n# find contours and then return the corners of a rotated bounding rectangle.\ndef box_from_contours(input_mask):\n temp_box = []\n\n # Boundary tracing\n im2, contours, hierarchy = cv2.findContours(input_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n #contours = BoundaryTracing.boundaryTracing(input_mask)\n contours_f = contours\n\n # Convert to numpy array\n contours_f = np.array(contours_f)\n for i in range(len(contours_f)):\n contours_f[i] = np.array(contours_f[i])\n\n # # Comment our while using our boundary tracing method\n # contours_f = contours\n\n # Find 4 points from a contour\n for i in range(len(contours_f)):\n cnt = contours_f[i]\n rect = cv2.minAreaRect(cnt)\n rectArea = rect[1][0]*rect[1][1]\n # contourArea = cv2.contourArea(cnt)\n # relationship_cr = contourArea / rectArea\n\n # Checks if the area of the rectangle meets a minimum\n if rectArea > 100:\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n temp_box.append(box)\n\n return temp_box\n\n\ndef detectionRed(clean_frame):\n # Red\n lower_redH = np.array([0])\n upper_redH = np.array([5])\n lower_redS = np.array([120])\n upper_redS = np.array([255])\n lower_redV = np.array([150])\n upper_redV = np.array([255])\n #mask_red = ImageProcessingMethods.threshold(clean_frame, lower_redH, upper_redH, lower_redS, upper_redS, lower_redV, upper_redV)\n\n lower_red = np.array([0, 120, 150])\n upper_red = np.array([8, 255, 255])\n mask_red = cv2.inRange(clean_frame, lower_red, upper_red)\n\n # Morphological operations\n processed = morphOp(mask_red)\n\n cv2.imshow('thresh_red', processed)\n # Find contours\n box = box_from_contours(processed)\n\n return box\n\n\ndef detectionBlue(clean_frame):\n # Blue\n lower_blueH = np.array([100])\n upper_blueH = np.array([120])\n lower_blueS = np.array([90])\n upper_blueS = np.array([255])\n lower_blueV = np.array([90])\n upper_blueV = np.array([255])\n #mask_blue = ImageProcessingMethods.threshold(clean_frame, lower_blueH, upper_blueH, lower_blueS, upper_blueS, lower_blueV, upper_blueV)\n\n lower_blue = np.array([100, 100, 100])\n upper_blue = np.array([120, 255, 255])\n mask_blue = cv2.inRange(clean_frame, lower_blue, upper_blue)\n\n # Morphological operations\n processed = morphOp(mask_blue)\n\n # Find contours\n box = box_from_contours(processed)\n\n return box\n","repo_name":"CeeGeeArt/P3","sub_path":"Detection.py","file_name":"Detection.py","file_ext":"py","file_size_in_byte":3042,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"291002","text":"class MovieShow:\n MIN = 3\n NOT_AVAILABLE = (-1, -1)\n\n def __init__(self, title, showtime, auditorium, rows, seats_in_row):\n self.__movie = title\n self.__time = showtime\n self.__theater = auditorium\n self.__seats = [[False for _ in range(max(3, seats_in_row))]\n for _ in range(max(3, rows))]\n\n def get_movie(self):\n return self.__movie\n\n def get_time(self):\n return self.__time\n\n def get_theater(self):\n return self.__theater\n\n def reserve_seat(self, row, seat):\n\n if 1 <= row <= len(self.__seats) \\\n and 1 <= seat <= len(self.__seats[0]):\n pass\n else:\n return False\n\n row_idx = row - 1\n seat_idx = seat - 1\n\n if self.__seats[row_idx][seat_idx]:\n return False\n else:\n self.__seats[row_idx][seat_idx] = True\n return True\n\n def reserve_seats(self, row, first, last):\n error = False\n booked_list = []\n for i in range(first, last+1):\n booked = self.reserve_seat(row, i)\n if not booked:\n error = True\n break\n else:\n booked_list.append((row, i))\n if error is False:\n return True\n else:\n for row, seat in booked_list:\n try:\n row_idx = row - 1\n seat_idx = seat - 1\n self.__seats[row_idx][seat_idx] = False\n except IndexError:\n pass\n return False\n\n def find_available_seats(self, number):\n\n for i in range(len(self.__seats)-1, -1, -1):\n n = 0\n start = -1\n for j in range(len(self.__seats[i])):\n if self.__seats[i][j] is False:\n if n == 0:\n start = j\n n += 1\n if n == number:\n row = i + 1\n seat = start + 1\n return row, seat\n else:\n n = 0\n return MovieShow.NOT_AVAILABLE\n\n def get_reservation_map(self):\n info = \"\"\n for i in range(len(self.__seats)-1, -1, -1):\n row = i + 1\n info_row = f\"{row: >02d}:\"\n for j in range(len(self.__seats[i])):\n if self.__seats[i][j] is False:\n info_row += \"-\"\n else:\n info_row += \"X\"\n\n if info != \"\":\n info += \"\\n\"\n info += info_row\n return info\n\n def seats_total(self):\n return len(self.__seats) * len(self.__seats[0])\n\n def total_reserved(self):\n n = 0\n for r in self.__seats:\n for s in r:\n n += int(s)\n return n\n\n def __str__(self):\n return f\"{self.__time} {self.__movie} {self.__theater} \" \\\n f\"reserved {self.total_reserved()}/{self.seats_total()}\"\n","repo_name":"KazuichiroTaira/CS_A1111","sub_path":"Round9/movie_show.py","file_name":"movie_show.py","file_ext":"py","file_size_in_byte":3029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8498659876","text":"from __future__ import print_function\n\nfrom builtins import range, str\nfrom inspect import currentframe\n\n#from PyQt5.QtGui import QContextMenuEvent\n\n__license__ = \"CeCILL v2\"\n__revision__ = \" $Id$ \"\n\n\nimport traceback\nfrom os.path import join as pj\n\n#from openalea import misc\n\nfrom qtpy import QtCore, QtGui, QtWidgets, QtSvg\nfrom openalea.visualea.qt.designer import generate_pyfile_from_uifile, get_data\n\nfrom openalea.core import cli, logger\nfrom openalea.core.algo.dataflow_evaluation import AbstractEvaluation\nfrom openalea.core.compositenode import CompositeNodeFactory\nfrom openalea.core.node import NodeFactory\nfrom openalea.core.pkgmanager import PackageManager\nfrom openalea.core.service.ipython import interpreter as get_interpreter\nfrom openalea.core.settings import NoOptionError, NoSectionError, Settings\n\nfrom openalea.oalab.shell import get_shell_class\n\nsrc = get_data(\"openalea.visualea.mainwindow\", \"resources\") / 'mainwindow.ui'\ndest = get_data(\"openalea.visualea.mainwindow\", \"ui_mainwindow.py\")\ngenerate_pyfile_from_uifile(__name__, src=src, dest=dest)\n\nfrom openalea.visualea import dataflowview, helpwidget, metainfo, ui_mainwindow\nfrom openalea.visualea.dialogs import (NewData, NewGraph, NewPackage,\n PreferencesDialog)\nfrom openalea.visualea.graph_operator import GraphOperator\nfrom openalea.visualea.graph_operator.vertex import VertexOperators\nfrom openalea.visualea.logger import LoggerView\nfrom openalea.visualea.node_treeview import (CategoryModel, DataPoolListView,\n DataPoolModel,\n NodeFactoryTreeView,\n NodeFactoryView, PkgModel,\n SearchListView, SearchModel)\nfrom openalea.visualea.node_widget import SignalSlotListener\n\nPROVENANCE = False\n\n\nclass MainWindow(QtWidgets.QMainWindow,\n ui_mainwindow.Ui_MainWindow,\n SignalSlotListener):\n\n def __init__(self, session, parent=None):\n \"\"\"\n @param session : user session\n @param parent : parent window\n \"\"\"\n QtWidgets.QMainWindow.__init__(self, parent)\n SignalSlotListener.__init__(self)\n ui_mainwindow.Ui_MainWindow.__init__(self)\n self.setupUi(self)\n self.setAcceptDrops(True)\n self.setAttribute(QtCore.Qt.WA_QuitOnClose)\n\n self.tabWorkspace.removeTab(0)\n self.tabWorkspace.setTabsClosable(True)\n self.ws_cpt = 0\n\n if hasattr(AbstractEvaluation, \"__provenance__\"):\n self._prov = AbstractEvaluation.__provenance__\n else:\n self._prov = False\n\n #last opened nodes\n self._last_opened = []\n\n #lower tab pane : python shell, logger...\n self.lowerpane = QtWidgets.QTabWidget()\n self.splitter.addWidget(self.lowerpane)\n\n # python interpreter\n #try:\n interpreter = get_interpreter()\n interpreter=None\n #except NameError:\n # InterpreterClass = get_interpreter_class()\n # interpreter = InterpreterClass()\n\n # interpreter init defered after session init\n shellclass = get_shell_class()\n self.interpreterWidget = shellclass(interpreter,\n cli.get_welcome_msg())\n interpreter = self.interpreterWidget.interpreter\n\n GraphOperator.globalInterpreter = interpreter\n self.lowerpane.addTab(self.interpreterWidget, \"Python Shell\")\n\n if logger.QT_LOGGING_MODEL_AVAILABLE:\n # openalea logger\n model = logger.LoggerOffice().get_handler(\"qt\")\n view = LoggerView(parent=self.lowerpane, model=model)\n self.lowerpane.addTab(view, \"Logging\")\n\n # search list view\n self.search_model = SearchModel()\n self.searchListView = \\\n SearchListView(self, self.searchview)\n self.searchListView.setModel(self.search_model)\n self.vboxlayout3.addWidget(self.searchListView)\n self.searchListView.clicked.connect(self.on_package_manager_focus_change)\n\n # help widget\n self.helpWidget = helpwidget.HelpWidget()\n # TODO: Update data from css\n '''\n css = pj(misc.__path__[0], \"..\", \"..\", \"..\",\n \"share\", \"_static\", \"openalea.css\")\n self.helpWidget.set_stylesheet_file(css)\n '''\n self.poolTabWidget.addTab(self.helpWidget, \"Help\")\n\n # Widgets\n\n # The fix didn't work for some reason so I kept the old buggy one (l.141/142)\n # self.tabWorkspace.contextMenuEvent.connect(self.contextMenuEvent)\n # self.connect(self.tabWorkspace, QtCore(\"contextMenuEvent(QContextMenuEvent)\"),\n # self.contextMenuEvent) # F. Bauget 2023-01-18\n self.tabWorkspace.customContextMenuRequested.connect(self.contextMenuEvent)\n self.tabWorkspace.currentChanged.connect(self.ws_changed)\n self.search_lineEdit.editingFinished.connect(self.search_node)\n self.tabWorkspace.tabCloseRequested.connect(self.close_tab_workspace)\n\n # Help Menu\n self.action_About.triggered.connect(self.about)\n self.actionOpenAlea_Web.triggered.connect(self.web)\n self.action_Help.triggered.connect(self.help)\n\n # File Menu\n self.action_New_Session.triggered.connect(self.new_session)\n self.action_Open_Session.triggered.connect(self.open_session)\n self.action_Save_Session.triggered.connect(self.save_session)\n self.actionSave_as.triggered.connect(self.save_as)\n self.action_Quit.triggered.connect(self.quit)\n\n self.action_Image.triggered.connect(self.export_image)\n self.action_Svg.triggered.connect(self.export_image_svg)\n\n # Package Manager Menu\n self.action_Auto_Search.triggered.connect(self.reload_all)\n self.action_Add_File.triggered.connect(self.add_pkgdir)\n self.actionFind_Node.triggered.connect(self.find_node)\n self.action_New_Network.triggered.connect(self.new_graph)\n self.actionNew_Python_Node.triggered.connect(self.new_python_node)\n self.actionNew_Package.triggered.connect(self.new_package)\n self.action_Data_File.triggered.connect(self.new_data)\n\n # DataPool Menu\n self.actionClear_Data_Pool.triggered.connect(self.clear_data_pool)\n\n # Python Menu\n self.action_Execute_script.triggered.connect(\n self.exec_python_script)\n self.actionOpen_Console.triggered.connect(\n self.open_python_console)\n self.actionClea_r_Console.triggered.connect(\n self.clear_python_console)\n\n # WorkspaceMenu\n self.__operatorAction = dict([(self.action_Run, \"graph_run\"),\n (self.actionInvalidate, \"graph_invalidate\"),\n (self.actionReset, \"graph_reset\"),\n (self.actionConfigure_I_O, \"graph_configure_io\"),\n (self.actionGroup_Selection, \"graph_group_selection\"),\n (self.action_Copy, \"graph_copy\"),\n (self.action_Paste, \"graph_paste\"),\n (self.action_Cut, \"graph_cut\"),\n (self.action_Delete_2, \"graph_remove_selection\"),\n (self.action_Close_current_workspace, \"graph_close\"),\n (self.action_Export_to_Factory, \"graph_export_to_factory\"),\n (self.actionReload_from_Model, \"graph_reload_from_factory\"),\n (self.actionExport_to_Application, \"graph_export_application\"),\n (self.actionPreview_Application, \"graph_preview_application\"),\n (self.actionAlignHorizontally, \"graph_align_selection_horizontal\"),\n (self.actionAlignLeft, \"graph_align_selection_left\"),\n (self.actionAlignRight, \"graph_align_selection_right\"),\n (self.actionAlignMean, \"graph_align_selection_mean\"),\n (self.actionDistributeHorizontally, \"graph_distribute_selection_horizontally\"),\n (self.actionDistributeVertically, \"graph_distribute_selection_vertically\"),\n (self.actionSetCustomColor, \"graph_set_selection_color\"),\n (self.actionUseCustomColor, \"graph_use_user_color\")])\n\n self._last_open_action_group = QtWidgets.QActionGroup(self)\n self._last_open_action_group.triggered.connect(\n self.reopen_last)\n self.action_New_Empty_Workspace.triggered.connect(self.new_workspace)\n self.menu_Workspace.aboutToShow.connect(self.__wsMenuShow)\n self.menu_Workspace.aboutToShow.connect(self.__wsMenuHide)\n for ac, fname in list(self.__operatorAction.items()):\n f = self.__make_operator_action_connector(ac, fname)\n ac.triggered.connect(f)\n\n self.actionTo_script.triggered.connect(self.to_python_script)\n\n # Window Mneu\n self.actionPreferences.triggered.connect(self.open_preferences)\n self.actionDisplay_Package_Manager.toggled.connect(self.display_leftpanel)\n self.actionDisplay_Workspaces.toggled.connect(self.display_rightpanel)\n\n #load personnal GUI settings\n self.read_settings()\n\n #############\n # Provenance\n #############\n if PROVENANCE:\n self.menu_provenance = QtWidgets.QMenu(self.menubar)\n self.menu_provenance.setObjectName(\"menu_provenance\")\n self.menu_provenance.setTitle(QtWidgets.QApplication.translate(\"MainWindow\", \"&Provenance\", None, QtWidgets.QApplication.UnicodeUTF8))\n\n self.action_activ_prov = QtWidgets.QAction(self)\n self.action_activ_prov.setCheckable(True)\n prov = self.get_provenance()\n self.action_activ_prov.setChecked(prov)\n self.action_activ_prov.setObjectName(\"action_activ_prov\")\n self.action_activ_prov.setText(QtWidgets.QApplication.translate(\"MainWindow\", \"Connect/Disconnect Provenance\", None, QtWidgets.QApplication.UnicodeUTF8))\n\n self.action_show_prov = QtWidgets.QAction(self)\n self.action_show_prov.setCheckable(False)\n self.action_show_prov.setObjectName(\"action_show_prov\")\n self.action_show_prov.setText(QtWidgets.QApplication.translate(\"MainWindow\", \"Show Provenance\", None, QtWidgets.QApplication.UnicodeUTF8))\n\n self.menu_provenance.addAction(self.action_activ_prov)\n self.menu_provenance.addAction(self.action_show_prov)\n\n self.menubar.addAction(self.menu_provenance.menuAction())\n\n self.action_activ_prov.toggled.connect(self.set_provenance)\n self.action_show_prov.triggered.connect(self.show_provenance)\n\n def set_provenance(self, provenance):\n \"\"\"\n Set/Unset the registry of provenance\n\n :param provenance: boolean which is set to True if we want to register provenance. Else, False.\n \"\"\"\n if hasattr(AbstractEvaluation, \"__provenance__\"):\n self._prov = provenance\n AbstractEvaluation.__provenance__ = self._prov\n\n def get_provenance(self):\n \"\"\"\n :return: boolean which is set to True if we want to register provenance. Else, False.\n \"\"\"\n return self._prov\n\n def show_provenance(self):\n \"\"\"\n Display the provenance\n \"\"\"\n from openalea.visualea.provenance import (ModalDialog,\n ProvenanceSelectorWidget,\n search_trace)\n prov_widget = ProvenanceSelectorWidget(self)\n dialog = ModalDialog(prov_widget)\n dialog.show()\n dialog.raise_()\n\n if dialog.exec_():\n cn = prov_widget.c_n.text()\n pkg = prov_widget.pkg.text()\n wk = prov_widget.workspace.text()\n\n search_trace(cn, pkg, wk, parent=self)\n\n def on_session_started(self, session):\n self.initialise(session)\n self.session = session\n\n # -- configure the interpreter --\n cli.init_interpreter(self.interpreterWidget.interpreter,\n session,\n {\"tabs\": self.tabWorkspace})\n\n # -- now, many package manager related views --\n self.pkgmanager = session.pkgmanager\n self.actionShow_log.triggered.connect(self.pkgmanager.log.print_log)\n\n # package tree view\n self.pkg_model = PkgModel(self.pkgmanager)\n self.packageTreeView = \\\n NodeFactoryTreeView(self, self.packageview)\n self.packageTreeView.setModel(self.pkg_model)\n self.vboxlayout1.addWidget(self.packageTreeView)\n self.packageTreeView.clicked.connect(self.on_package_manager_focus_change)\n\n # category tree view\n self.cat_model = CategoryModel(self.pkgmanager)\n self.categoryTreeView = \\\n NodeFactoryTreeView(self, self.categoryview)\n self.categoryTreeView.setModel(self.cat_model)\n self.vboxlayout2.addWidget(self.categoryTreeView)\n self.categoryTreeView.clicked.connect(self.on_package_manager_focus_change)\n\n # data pool list view\n self.datapool_model = DataPoolModel(session.datapool)\n self.datapoolListView = \\\n DataPoolListView(self, session.datapool, self.pooltab)\n self.datapoolListView.setModel(self.datapool_model)\n self.vboxlayout4.addWidget(self.datapoolListView)\n\n self.session.simulate_workspace_addition()\n\n def debug(self):\n v = self.packageTreeView\n # fix_print_with_import\n print((\"items\", v.expanded_items))\n # fix_print_with_import\n print((\"model\", v.model()))\n # fix_print_with_import\n print((\"map\", v.model().index_map))\n\n def write_settings(self):\n \"\"\"Save application settings.\n \"\"\"\n settings = Settings()\n\n #main window\n settings.set(\"MainWindow\", \"size\", \"(%d,%d)\" % (self.width(), self.height()))\n settings.set(\"MainWindow\", \"pos\", \"(%d,%d)\" % (self.x(), self.y()))\n\n sizes = \"[%s]\" % \",\".join(\"%d\" % val for val in self.splitter_2.sizes())\n settings.set(\"MainWindow\", \"splitter_2\", sizes)\n sizes = \"[%s]\" % \",\".join(\"%d\" % val for val in self.splitter_3.sizes())\n settings.set(\"MainWindow\", \"splitter_3\", sizes)\n\n #tree view\n settings.set(\"TreeView\", \"open\", \"[]\")\n\n #workspace\n last_open = \"[%s]\" % \",\".join(\"'%s'\" % item for item in self._last_opened)\n settings.set(\"WorkSpace\", \"last\", last_open)\n\n #provenance\n prov = self.get_provenance()\n settings.set(\"Provenance\", \"enable\", str(prov))\n\n settings.write()\n\n def read_settings(self):\n \"\"\"Read application settings.\n \"\"\"\n settings = Settings()\n\n #main window\n try:\n size = eval(settings.get(\"MainWindow\", \"size\"))\n self.resize(QtCore.QSize(*size))\n except NoSectionError:\n pass\n except NoOptionError:\n pass\n try:\n pos = eval(settings.get(\"MainWindow\", \"pos\"))\n self.move(QtCore.QPoint(*pos))\n except NoSectionError:\n pass\n except NoOptionError:\n pass\n try:\n sizes = eval(settings.get(\"MainWindow\", \"splitter_2\"))\n self.splitter_2.setSizes(sizes)\n except NoSectionError:\n pass\n except NoOptionError:\n pass\n try:\n sizes = eval(settings.get(\"MainWindow\", \"splitter_3\"))\n self.splitter_3.setSizes(sizes)\n except NoSectionError:\n pass\n except NoOptionError:\n pass\n #workspace\n try:\n last_open = eval(settings.get(\"WorkSpace\", \"last\"))\n last_open.reverse()\n for item in last_open:\n gr = item.split(\".\")\n pkgid = \".\".join(gr[:-1])\n name = gr[-1]\n self.add_last_open(pkgid, name)\n except NoSectionError:\n pass\n except NoOptionError:\n pass\n\n try:\n prov = eval(settings.get(\"Provenance\", \"enable\"))\n self.set_provenance(bool(prov))\n except NoSectionError:\n pass\n except NoOptionError:\n pass\n\n def redo_last_open_menu(self):\n \"\"\"Create entries for last opened nodes.\n \"\"\"\n self.menuLast_open.clear()\n for action in self._last_open_action_group.actions():\n self._last_open_action_group.removeAction(action)\n\n for i, node_descr in enumerate(self._last_opened):\n action = self.menuLast_open.addAction(node_descr)\n action.setShortcut(\"Ctrl+%d\" % (i + 1))\n self._last_open_action_group.addAction(action)\n\n self.menuLast_open.setEnabled(len(self._last_opened) > 0)\n\n def reopen_last(self, action):\n \"\"\"Reopen a last open node.\n \"\"\"\n gr = str(action.text()).split(\".\")\n pkgid = \".\".join(gr[:-1])\n name = gr[-1]\n manager = PackageManager()\n factory = manager[pkgid][name]\n self.open_compositenode(factory)\n\n def add_last_open(self, pkgid, factory_name):\n \"\"\"Register a new lest opened node.\n \"\"\"\n key = \".\".join([pkgid, factory_name])\n try:\n self._last_opened.remove(key)\n except ValueError:\n pass\n\n self._last_opened.insert(0, key)\n if len(self._last_opened) > 4:\n del self._last_opened[-1]\n\n self.redo_last_open_menu()\n\n def __wsMenuShow(self, abool=False):\n graphview = self.tabWorkspace.currentWidget()\n if not isinstance(graphview, dataflowview.DataflowView):\n return\n\n items = graphview.scene().get_selected_items(dataflowview.vertex.GraphicalVertex)\n self.actionUseCustomColor.setChecked(False)\n for i in items:\n if i.vertex().get_ad_hoc_dict().get_metadata(\"useUserColor\"):\n self.actionUseCustomColor.setChecked(True)\n break\n\n def __make_operator_action_connector(self, action, name):\n def connector(aBool=None):\n graphview = self.tabWorkspace.currentWidget()\n if not isinstance(graphview, dataflowview.DataflowView):\n return\n\n # daniel was here: now the menu is built using the graph operator.\n operator = GraphOperator(graph=graphview.scene().get_graph(),\n graphScene=graphview.scene(),\n clipboard=self.session.clipboard,\n siblings=self.session.workspaces)\n operator.register_listener(self)\n operator(fName=name)()\n\n return connector\n\n def __wsMenuHide(self):\n pass\n\n def open_compositenode(self, factory):\n \"\"\" open a composite node editor \"\"\"\n node = factory.instantiate()\n\n self.session.add_workspace(node, notify=False)\n self.open_widget_tab(node, factory=factory)\n\n self.add_last_open(factory.__pkg_id__, factory.name)\n\n def about(self):\n \"\"\" Display About Dialog \"\"\"\n\n mess = QtWidgets.QMessageBox.about(self, \"About Visualea\",\n \"Version %s\\n\\n\" % (metainfo.get_version()) +\n \"VisuAlea is part of the OpenAlea framework.\\n\" +\n metainfo.get_copyright() +\n \"This Software is distributed under the Cecill-V2 License.\\n\\n\" +\n \"Visit \" + metainfo.url + \"\\n\\n\"\n )\n\n def help(self):\n \"\"\" Display help \"\"\"\n self.web()\n\n def web(self):\n \"\"\" Open OpenAlea website \"\"\"\n QtGui.QDesktopServices.openUrl(QtCore.QUrl(metainfo.url))\n\n def quit(self):\n \"\"\" Quit Application \"\"\"\n if(QtWidgets.QMessageBox.question(self, \"Quit?\", \"Are you sure you want to quit?\",\n QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel) ==\n QtWidgets.QMessageBox.Ok):\n QtWidgets.QApplication.exit(0)\n\n def notify(self, sender, event):\n \"\"\" Notification from observed \"\"\"\n if event and isinstance(sender, GraphOperator):\n index = -1\n for i in range(self.tabWorkspace.count()):\n wid = self.tabWorkspace.widget(i)\n if isinstance(wid, dataflowview.DataflowView) and wid.scene() == event[1]:\n index = i\n if index <= -1:\n return\n if(event[0] == \"graphoperator_graphsaved\"):\n self.reinit_treeview()\n caption = \"Workspace %i - %s\" % (index, event[2].name)\n self.tabWorkspace.setTabText(index, caption)\n elif(event[0] == \"graphoperator_graphclosed\"):\n self.close_tab_workspace(index)\n elif(event[0] == \"graphoperator_graphreloaded\"):\n self.session.workspaces[index] = event[2]\n\n if(type(sender) == type(self.session)):\n if(event and event[0] == \"workspace_added\"):\n graph = event[1]\n self.open_widget_tab(graph, graph.factory)\n else:\n self.update_tabwidget()\n self.reinit_treeview()\n\n def closeEvent(self, event):\n \"\"\" Close All subwindows \"\"\"\n\n #Save personnal settings\n self.write_settings()\n\n #close windows\n for i in range(self.tabWorkspace.count()):\n w = self.tabWorkspace.widget(i)\n w.close()\n\n event.accept()\n\n def reinit_treeview(self):\n \"\"\" Reinitialise package and category views \"\"\"\n self.cat_model.reset()\n self.pkg_model.reset()\n self.datapool_model.reset()\n self.search_model.reset()\n\n def close_tab_workspace(self, cindex):\n \"\"\" Close workspace indexed by cindex cindex is Node\"\"\"\n w = self.tabWorkspace.widget(cindex)\n self.tabWorkspace.removeTab(cindex)\n self.session.close_workspace(cindex, False)\n g = w.scene().get_graph()\n g.close()\n #finally we close the dataflowview.\n w.close()\n del w\n\n def current_view(self):\n \"\"\" Return the active widget \"\"\"\n return self.tabWorkspace.currentWidget()\n\n def update_tabwidget(self):\n \"\"\" open tab widget \"\"\"\n # open tab widgets\n for (i, node) in enumerate(self.session.workspaces):\n\n if(i < self.tabWorkspace.count()):\n widget = self.tabWorkspace.widget(i)\n if(node != widget.scene().get_graph()):\n self.close_tab_workspace(i)\n self.open_widget_tab(node, factory=node.factory, pos=i)\n\n # close last tabs\n removelist = list(range(len(self.session.workspaces), self.tabWorkspace.count()))\n removelist.reverse()\n for i in removelist:\n self.close_tab_workspace(i)\n\n def open_widget_tab(self, graph, factory, caption=None, pos=-1):\n \"\"\"\n Open a widget in a tab giving an instance and its widget\n caption is append to the tab title\n \"\"\"\n gwidget = None\n try:\n # Since dataflowview.GraphicalGraph.__adapterType__ is dataflowview.adapter.GraphAdapter\n # graph will automatically be wrapped by that class and gwidget will exclusevily\n # talk to the adapter instead of the original graph. This thing is twisted but works well.\n gwidget = dataflowview.GraphicalGraph.create_view(graph, parent=self)\n gwidget.set_clipboard(self.session.clipboard)\n gwidget.set_siblings(self.session.workspaces)\n gwidget.scene().focusedItemChanged.connect(self.on_scene_focus_change)\n self.session.add_graph_view(gwidget)\n except Exception as e:\n # fix_print_with_import\n print((\"open_widget_tab\", e))\n traceback.print_exc()\n return\n\n if(not caption):\n i = self.session.workspaces.index(graph)\n caption = \"Workspace %i - %s\" % (i, graph.get_caption())\n\n index = self.tabWorkspace.insertTab(pos, gwidget, caption)\n self.tabWorkspace.setCurrentIndex(index)\n #there is a bug in QGraphicsScene+QTabWidget that makes\n #secondary tabs inactive, so we force them to be active\n #by sending new views the QEvent.WindowActivate event.\n #The bug is present until Qt4.6.2 at least. Bugreport:\n #http://bugreports.qt.nokia.com/browse/QTBUG-11148\n QtCore.QCoreApplication.instance().notify(gwidget, QtCore.QEvent(QtCore.QEvent.WindowActivate))\n if gwidget is not None:\n gwidget.show_entire_scene()\n return index\n\n def add_pkgdir(self):\n dirname = QtWidgets.QFileDialog.getExistingDirectory(self, \"Select Package/Directory\")\n if(dirname):\n self.pkgmanager.load_directory(str(dirname))\n self.reinit_treeview()\n\n def reload_all(self):\n\n # Reload package manager\n self.pkgmanager.reload()\n self.reinit_treeview()\n\n # Reload workspace\n print(\"WARNING TODO RELOAD EACH TAB\")\n #for index in range(len(self.index_nodewidget)):\n # self.reload_from_factory(index)\n\n def ws_changed(self, index):\n \"\"\" Current workspace has changed \"\"\"\n self.session.cworkspace = index\n\n def contextMenuEvent(self, event):\n \"\"\" Context menu event : Display the menu\"\"\"\n\n pos = self.tabWorkspace.mapFromGlobal(event.globalPos())\n\n tabBar = self.tabWorkspace.tabBar()\n count = tabBar.count()\n\n index = -1\n for i in range(count):\n if(tabBar.tabRect(i).contains(pos)):\n index = i\n break\n\n # if no bar was hit, return\n if (index < 0):\n return\n\n # set hit bar to front\n self.tabWorkspace.setCurrentIndex(index)\n\n def close_current_ws():\n self.close_tab_workspace(index)\n\n menu = QtWidgets.QMenu(self)\n\n action = menu.addAction(\"Close\")\n action.triggered.connect(lambda :self.close_tab_workspace(index))\n #action.triggered.connect(close_current_ws)\n\n# action = menu.addAction(\"Run\")\n# self.connect(action, QtCore.pyqtSignal(\"triggered()\"), self.run)\n\n# action = menu.addAction(\"Export to Model\")\n# self.connect(action, QtCore.pyqtSignal(\"triggered()\"), self.export_to_factory)\n\n menu.move(event.globalPos())\n menu.show()\n\n def new_workspace(self):\n \"\"\" Create an empty workspace \"\"\"\n self.session.add_workspace()\n\n def new_graph(self):\n \"\"\" Create a new graph \"\"\"\n\n dialog = NewGraph(\"New Composite Node\", self.pkgmanager, self)\n ret = dialog.exec_()\n\n if(ret > 0):\n newfactory = dialog.create_cnfactory(self.pkgmanager)\n self.reinit_treeview()\n self.open_compositenode(newfactory)\n\n def new_python_node(self):\n \"\"\" Create a new node \"\"\"\n\n dialog = NewGraph(\"New Python Node\", self.pkgmanager, self)\n ret = dialog.exec_()\n\n if(ret > 0):\n dialog.create_nodefactory(self.pkgmanager)\n self.reinit_treeview()\n\n def new_data(self):\n \"\"\" Import file \"\"\"\n\n dialog = NewData(\"Import data file\", self.pkgmanager, self)\n ret = dialog.exec_()\n\n if(ret > 0):\n dialog.create_datafactory(self.pkgmanager)\n self.reinit_treeview()\n\n def new_package(self):\n \"\"\" Create a new user package \"\"\"\n\n dialog = NewPackage(list(self.pkgmanager.keys()), parent=self)\n ret = dialog.exec_()\n\n if(ret > 0):\n (name, metainfo, path) = dialog.get_data()\n\n self.pkgmanager.create_user_package(name, metainfo, path)\n self.reinit_treeview()\n\n def exec_python_script(self):\n \"\"\" Choose a python source and execute it \"\"\"\n\n filename, _ = QtWidgets.QFileDialog.getOpenFileName(\n self, \"Python Script\", filter=\"Python script (*.py)\")\n\n filename = str(filename)\n if(not filename):\n return\n\n # Try if IPython\n try:\n file = open(filename, 'r')\n src = \"\"\n for f in file:\n src += f\n self.interpreterWidget.get_interpreter().runcode(src, hidden=False)\n file.close()\n\n except:\n file = open(filename, 'r')\n sources = ''\n compiled = None\n import code\n for line in file:\n sources += line\n compiled = code.compile_command(sources, filename)\n if(compiled):\n self.interpreterWidget.get_interpreter().runcode(compiled)\n sources = ''\n file.close()\n\n sources = ''\n\n def open_python_console(self):\n \"\"\" Set focus on the python shell \"\"\"\n try:\n self.interpreterWidget.setFocus(QtCore.Qt.ShortcutFocusReason)\n except:\n pass\n\n def clear_python_console(self):\n \"\"\" Clear python shell \"\"\"\n self.interpreterWidget.clear()\n\n def new_session(self):\n self.session.clear()\n\n def open_session(self):\n\n filename, _ = QtWidgets.QFileDialog.getOpenFileName(\n self, \"OpenAlea Session\", QtCore.QDir.homePath(), \"Session file (*.oas)\")\n\n filename = str(filename)\n if(not filename):\n return\n\n self.session.load(filename)\n\n def save_session(self):\n \"\"\" Save menu entry \"\"\"\n\n if(not self.session.session_filename):\n self.save_as()\n else:\n self.session.save(self.session.session_filename)\n\n def save_as(self):\n \"\"\" Save as menu entry \"\"\"\n\n filename, _ = QtWidgets.QFileDialog.getSaveFileName(\n self, \"OpenAlea Session\", QtCore.QDir.homePath(), \"Session file (*.oas)\")\n\n filename = str(filename)\n if(not filename):\n return\n\n self.session.save(filename)\n\n def clear_data_pool(self):\n \"\"\" Clear the data pool \"\"\"\n\n self.session.datapool.clear()\n\n def search_node(self):\n \"\"\" Activated when search line edit is validated \"\"\"\n\n # text = str(str(self.search_lineEdit.text()).encode('latin1')) # don't get why this encode()\n text = self.search_lineEdit.text()\n results = self.pkgmanager.search_node(text)\n self.search_model.set_results(results)\n\n def find_node(self):\n \"\"\" Find node Command \"\"\"\n\n i = self.tabPackager.indexOf(self.searchview)\n self.tabPackager.setCurrentIndex(i)\n self.search_lineEdit.setFocus()\n\n def open_preferences(self):\n \"\"\" Open Preference dialog \"\"\"\n dialog = PreferencesDialog(self)\n dialog.show()\n ret = dialog.exec_()\n\n # ! does not return anythin and do not use ret ?\n\n # Drag and drop support\n def dragEnterEvent(self, event):\n \"\"\"todo\"\"\"\n if event.mimeData().hasUrls():\n event.accept()\n else:\n event.ignore()\n\n def dropEvent(self, event):\n \"\"\"todo\"\"\"\n urls = event.mimeData().urls()\n try:\n file = urls[0]\n filename = str(file.path())\n self.session.load(filename)\n event.accept()\n\n except Exception as e:\n print(e)\n event.ignore()\n\n ############################\n # Handling the Help widget #\n ############################\n def on_scene_focus_change(self, scene, item):\n assert isinstance(item, dataflowview.vertex.GraphicalVertex)\n self.helpWidget.set_rst(item.vertex().get_tip())\n\n def on_package_manager_focus_change(self, item):\n pkg_id, factory_id, mimetype = NodeFactoryView.get_item_info(item)\n if len(pkg_id) and len(factory_id) and mimetype in [NodeFactory.mimetype,\n CompositeNodeFactory.mimetype]:\n factory = self.pkgmanager[pkg_id][factory_id]\n factoryDoc = factory.get_documentation()\n txt = factory.get_tip(asRst=True) + \"\\n\\n\"\n if factoryDoc is not None:\n txt += \"**Docstring:**\\n\" + factoryDoc\n self.helpWidget.set_rst(txt)\n\n # Window support\n def display_leftpanel(self, toggled):\n self.splitter_2.setVisible(toggled)\n\n def display_rightpanel(self, toggled):\n self.splitter.setVisible(toggled)\n\n def to_python_script(self):\n \"\"\"Translate the active workspace into a python script\"\"\"\n\n widget = self.tabWorkspace.currentWidget()\n if widget is None:\n return\n\n composite_node = widget.scene().get_graph()\n if composite_node is not None:\n print(\"BEGIN script\")\n # fix_print_with_import\n print((composite_node.to_script(), \"END script\"))\n\n def export_image(self):\n \"\"\" Export current workspace to an image \"\"\"\n\n filename, _ = QtWidgets.QFileDialog.getSaveFileName(\n self, \"Export image\", QtCore.QDir.homePath(), \"PNG Image (*.png)\")\n\n filename = str(filename)\n if not filename:\n return\n elif '.' not in filename:\n filename += '.png'\n\n # Get current workspace\n view = self.tabWorkspace.currentWidget()\n # Retreive the user layout\n rect = view.scene().sceneRect()\n matrix = view.transform()\n rect = matrix.mapRect(rect)\n\n pixmap = QtGui.QPixmap(rect.width(), rect.height())\n pixmap.fill()\n painter = QtGui.QPainter(pixmap)\n painter.setRenderHint(QtGui.QPainter.Antialiasing)\n view.update()\n view.scene().render(painter)\n painter.end()\n pixmap.save(filename)\n\n def export_image_svg(self):\n \"\"\" Export current workspace to an image \"\"\"\n\n filename, _ = QtWidgets.QFileDialog.getSaveFileName(\n self, \"Export svg image\", QtCore.QDir.homePath(), \"SVG Image (*.svg)\")\n\n filename = str(filename) # useless ?\n if not filename:\n return\n elif '.' not in filename:\n filename += '.png'\n\n # Get current workspace\n view = self.tabWorkspace.currentWidget()\n\n # Retreive the user layout\n rect = view.scene().sceneRect()\n matrix = view.transform()\n rect = matrix.mapRect(rect)\n\n svg_gen = QtSvg.QSvgGenerator()\n svg_gen.setFileName(filename)\n svg_gen.setSize(rect.toRect().size())\n\n painter = QtGui.QPainter(svg_gen)\n painter.setRenderHint(QtGui.QPainter.Antialiasing)\n view.scene().render(painter, )\n painter.end()\n","repo_name":"openalea/visualea","sub_path":"src/openalea/visualea/mainwindow.py","file_name":"mainwindow.py","file_ext":"py","file_size_in_byte":35371,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"3630682727","text":"#!/usr/bin/python\n\nimport re,os\n\nsymptom_name_mapping = {'low self-esteem': 'low-self-esteem', 'obsessive behavior': 'obsessive-behavior', 'social inhibition': 'social-inhibition', 'withdrawal sickness': 'withdrawal-sickness', 'racing thoughts': 'racing-thoughts', 'compulsive behavior': 'compulsive-behavior', 'academic failure': 'academic-failure', 'chronic pain': 'chronic-pain', 'suicidal ideation': 'suicidal-ideation', 'problems concentrating': 'problems-concentrating', 'detached behavior': 'detached-behavior', 'suicidal behavior': 'suicidal-behavior', 'back pain': 'back-pain', 'loss of appetite': 'loss-of-appetite', 'general pain': 'general-pain', 'sexual dysfunction': 'sexual-dysfunction', 'acting out': 'acting-out', 'disorganized thoughts': 'disorganized-thoughts', 'severe sensitivity': 'severe-sensitivity', 'danger to others': 'danger-to-others', 'danger to self': 'danger-to-self', 'phantom pain': 'phantom-pain', 'family issues':'family-issues', 'family history':'family-history'}\n\ndef uccfilereader(filename):\n\n\tfile = open(filename,'r').readlines()\n\tfile_data = {}\n\tfile_lines = []\n\tfor line in file[1:]:\n\n\t\ttry:\n\t\t\trole, data = line.lstrip().strip().split(\":\")\n\t\t\ttext,tags = preprocess(data)\n\n\t\t\t#normalize codes\n\t\t\tfor i in range(0,len(tags)):\n\t\t\t\tif tags[i] in file_data.keys():\n\t\t\t\t\tfile_data[tags[i]].append(text)\n\t\t\t\telse:\n\t\t\t\t\tfile_data[tags[i]] = []\n\t\t\t\t\tfile_data[tags[i]].append(text)\t\n\t\t\t\t\tfile_lines.append(text)\n\n# tags = list(set(tags))\n# file_data.append([text,role,tags])\n\t\texcept:\n\t\t\tcontinue\n#\t\tprint(file_dat)\n\tfor key in file_data.keys():\n\t\t\toutput_file = open(\"data/\"+os.path.basename(filename)+\"_\"+key+\".txt\",'w')\n\t\t\tfor line in file_data[key]:\n\t\t\t\toutput_file.write(line+\" .\\n\")\n\t\t\toutput_file.close()\n\n\n# output_file\n\treturn \" .\".join(file_lines)\n\ndef preprocess(text):\n\n tags = re.findall('\\{.*?\\}',text)\n\n #remove all tags\n for tag in tags:\n text = text.replace(tag,'')\n for i in range(0,len(tags)):\n tags[i] = tags[i].replace(\"{\",\"\")\n tags[i] = tags[i].replace(\"}\",\"\")\n\n\n for key in symptom_name_mapping.keys():\n \ttext = text.replace(key, symptom_name_mapping[key])\n# \tprint(\"~~~searching\")\n return text.lstrip(),tags\n","repo_name":"ksingla025/Topic_based_template_summarization","sub_path":"summarize_codes.py","file_name":"summarize_codes.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22315229046","text":"# \nimport dill\nimport os\nimport json\nimport pandas as pd\n\npath = os.environ.get('PROJECT_PATH', '../..')\n\n\ndef predict():\n def newest(file_dir):\n files = os.listdir(file_dir)\n paths = [os.path.join(file_dir, basename) for basename in files]\n return max(paths, key=os.path.getctime)\n\n pkl_path = newest(f'{path}/data/models')\n with open(pkl_path, 'rb') as file:\n model = dill.load(file)\n\n df_predict = pd.DataFrame(columns=['id', 'predict'])\n\n for filename in os.listdir(f'{path}/data/test'):\n with open(os.path.join(f'{path}/data/test', filename), 'r') as file:\n js_file = json.load(file)\n df = pd.DataFrame.from_dict([js_file])\n\n y = model.predict(df)\n pred = {'id': df.id.values[0], 'predict': y[0]}\n df_predict = df_predict.append(pred, ignore_index=True)\n df_predict.to_csv(f'{path}/data/predictions/predict.csv', index=False)\n\n\nif __name__ == '__main__':\n predict()\n","repo_name":"TomatooJuice/HW33","sub_path":"dags/modules/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36616291244","text":"from django.http import HttpResponse\nfrom reportlab.pdfgen import canvas\nimport os\nimport markdown\nfrom tkinter import filedialog\nfrom tkinter import *\n\ndef convertFileMdToHtml(request):\n root=Tk()\n root.withdraw()\n root.lift()\n root.attributes('-topmost',True)\n filename = filedialog.askopenfilename(parent=root, initialdir = \"/\",title = \"Select file\",filetypes = ((\"Markdown file\" ,\"*.md\"),(\"all files\",\"*.*\")))\n root.destroy()\n #si l'utilisateur ne sélectionne pas de fichier\n if filename == \"\":\n print(\"Aucun fichier sélectionné\")\n alert =\"('Aucun fichier sélectionné');\"\n elif filename[-2:] == \"md\":\n #Récupère la taille du fichier\n sizeFile= os.path.getsize(filename)\n if sizeFile > 10000000:\n print(\"Fichier trop volumineux\")\n alert =\"('Fichier trop volumineux');\"\n else:\n with open(filename, \"r\") as f:\n text=f.read()\n html=markdown.markdown(text)\n with open(os.path.expanduser(\"~/Downloads/mdConvert.html\"), \"w\") as f:\n f.write(html) \n print(\"Fichier converti avec succès !\")\n alert =\"('Fichier converti');\"\n else:\n print(\"Fichier non pris en charge\")\n alert =\"('Fichier non pris en charge');\" \n return HttpResponse(\"\"\" \"\"\") \n","repo_name":"Abusbuse/-PROJET--Creation-du-site-de-conversion-de-fichiers","sub_path":"src/monProjet/home/mdToHtml.py","file_name":"mdToHtml.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6159661844","text":"# Program to compare multiple rtf files across different folders\r\n# This will output the differences in HTML layout\r\n\r\nimport os\r\nimport difflib\r\nimport re\r\nfrom difflib import HtmlDiff\r\nfrom pathlib import Path\r\n\r\ntxt_1 = r\"C:\\Users\\vidya\\Python\\rtf_comparator\\folder1\"\r\ntxt_2 = r\"C:\\Users\\vidya\\Python\\rtf_comparator\\folder2\"\r\npath=r\"C:\\Users\\vidya\\Python\\rtf_comparator\\folder1\"\r\n\r\nfol_1 = []\r\nfol_2 = []\r\nfor fname in os.listdir(path=txt_1):\r\n fol_1.append(fname)\r\nfor fname in os.listdir(path=txt_2):\r\n fol_2.append(fname)\r\nfor i in fol_1:\r\n for j in fol_2:\r\n print(\"First file is: \", i)\r\n print(\"Second file is: \", j)\r\n print(\"First folder location is: \", txt_1)\r\n file1lines = open(os.path.join(txt_1, i)).readlines()\r\n file2lines = open(os.path.join(txt_2, j)).readlines()\r\n #i = i.rstrip('\\r\\n')\r\n #j = j.rstrip('\\r\\n')\r\n i = re.sub(r'\\r\\n\\s+',' ', i).strip()\r\n j = re.sub(r'\\r\\n\\s+',' ', j).strip()\r\n diff = difflib.HtmlDiff().make_file(file1lines, file2lines, i, j,context=False)\r\n path = r\"C:\\Users\\vidya\\Python\\rtf_comparator\\folder1\\\\\" + os.path.basename(i)+ '_compare.html'\r\n print(\"Output file is: \", path)\r\n #f=open(r\"C:\\Users\\vidya\\Python\\rtf_comparator\\folder1\\\" + str(i) + str(\"compare.html\"),'w')\r\n f=open(path,'w')\r\n f.write(diff)\r\n f.close()\r\n\r\n\r\n #sys.stdout.writelines(diff)\r\n\r\n","repo_name":"gvasist/comparertfs","sub_path":"rtf_comparator_withdifferences_in_html.py","file_name":"rtf_comparator_withdifferences_in_html.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38956392094","text":"def non_neg_int(n):\n result = int(n)\n if result < 0:\n raise ValueError(result)\n return result\n\n\nwhile True:\n x = non_neg_int(input('Please enter a nonnegative integer:'))\n if x == 999: # Secret number exits loop\n break\n print('You entered', x)\n","repo_name":"halterman/PythonBook-SourceCode","sub_path":"Chap12/nonnegconvert.py","file_name":"nonnegconvert.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"21"} +{"seq_id":"71895219894","text":"import re\n\n\ndef my_validator(self, val, value_valid):\n is_match = value_valid.search(val)\n return is_match\n\nfirst_name_valid = re.compile(r'^[A-z]+\\ ?[A-z]?$')\nphone_valid = re.compile(r'\\+380\\d{9}')\nlast_name_valid = re.compile(r'^[A-z]+\\-?[A-z]?$')\n\n\ndef clean(self):\n if not self.my_validator(self.phone_number, self.phone_valid):\n logger.warning('bad user, wrong data number')\n raise ValidationError('not correct number')\n if not self.my_validator(self.first_name, self.first_name_valid):\n logger.warning('bad user, wrong data first name')\n raise ValidationError('not correct first name')\n if not self.my_validator(self.last_name, self.last_name_valid):\n logger.warning('bad user, wrong data last name')\n raise ValidationError('not correct last_name')\n\n\n","repo_name":"VladLungul/university_django_itea_homework","sub_path":"university/university/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18379834191","text":"\nimport sys\nimport shutil\nfrom tempfile import gettempdir\nfrom os.path import join\n\nfrom wazuh_testing.qa_ctl.provisioning.ansible.ansible_output import AnsibleOutput\nfrom wazuh_testing.qa_ctl.provisioning.ansible.ansible_playbook import AnsiblePlaybook\nfrom wazuh_testing.qa_ctl import QACTL_LOGGER\nfrom wazuh_testing.tools.logging import Logging\nfrom wazuh_testing.tools.exceptions import AnsibleException\n\nif sys.platform != 'win32':\n import ansible_runner\n\n\nclass AnsibleRunner:\n \"\"\"Allow to run ansible playbooks in the indicated hosts.\n\n Args:\n ansible_inventory_path (string): Path where is located the ansible inventory file.\n ansible_playbook_path (string): Path where is located the playbook file.\n private_data_dir (string): Path where the artifacts files (result files) will be stored.\n output (boolean): True for showing ansible task output in stdout False otherwise.\n task_id (str): Runner task id. It allows to identify the task.\n\n Attributes:\n ansible_inventory_path (string): Path where is located the ansible inventory file.\n ansible_playbook_path (string): Path where is located the playbook file.\n private_data_dir (string): Path where the artifacts files (result files) will be stored.\n output (boolean): True for showing ansible task output in stdout False otherwise.\n task_id (str): Runner task id. It allows to identify the task.\n \"\"\"\n LOGGER = Logging.get_logger(QACTL_LOGGER)\n\n def __init__(self, ansible_inventory_path, ansible_playbook_path,\n private_data_dir=join(gettempdir(), 'wazuh_qa_ctl'), output=False, task_id=None):\n self.ansible_inventory_path = ansible_inventory_path\n self.ansible_playbook_path = ansible_playbook_path\n self.private_data_dir = private_data_dir\n self.output = output\n self.task_id = task_id\n\n def run(self, log_ansible_error=True):\n \"\"\"Run the ansible playbook in the indicated hosts.\n\n Args:\n log_ansible_error (boolean): True for logging the error exception message if any.\n\n Returns:\n AnsibleOutput: Result of the ansible playbook run.\n \"\"\"\n quiet = not self.output\n AnsibleRunner.LOGGER.debug(f\"Running {self.ansible_playbook_path} ansible-playbook with \"\n f\"{self.ansible_inventory_path} inventory\")\n\n runner = ansible_runner.run(private_data_dir=self.private_data_dir, playbook=self.ansible_playbook_path,\n inventory=self.ansible_inventory_path, quiet=quiet,\n envvars={'ANSIBLE_GATHER_TIMEOUT': 30, 'ANSIBLE_TIMEOUT': 20})\n ansible_output = AnsibleOutput(runner)\n\n if ansible_output.rc != 0:\n raise AnsibleException(f'Failed: {ansible_output}', AnsibleRunner.LOGGER.error, QACTL_LOGGER) if \\\n log_ansible_error else AnsibleException(f'Failed: {ansible_output}')\n\n return ansible_output\n\n @staticmethod\n def run_ephemeral_tasks(ansible_inventory_path, playbook_parameters, raise_on_error=True, output=False,\n log_ansible_error=True):\n \"\"\"Run the ansible tasks given from playbook parameters\n\n Args:\n ansible_inventory_path (string): Path were the ansible directory is placed.\n playbook_parameters : Parameters for the ansible playbook.\n raise_on_error (boolean): Set if errors or unexpected behaviour are goint to raise errors, Set to 'True'\n by default.\n output (boolean): Set if there are going to be outputs. Set to 'False' by default.\n log_ansible_error (boolean): True for logging the error exception message if any.\n\n Returns:\n AnsibleOutput: Result of the ansible playbook run.\n\n \"\"\"\n ansible_playbook = AnsiblePlaybook(**playbook_parameters)\n quiet = not output\n\n try:\n AnsibleRunner.LOGGER.debug(f\"Running {ansible_playbook.playbook_file_path} ansible-playbook with \"\n f\"{ansible_inventory_path} inventory\")\n runner = ansible_runner.run(playbook=ansible_playbook.playbook_file_path, inventory=ansible_inventory_path,\n quiet=quiet, envvars={'ANSIBLE_GATHER_TIMEOUT': 30, 'ANSIBLE_TIMEOUT': 20})\n ansible_output = AnsibleOutput(runner)\n\n if ansible_output.rc != 0 and raise_on_error:\n raise AnsibleException(f'Failed: {ansible_output}', AnsibleRunner.LOGGER.error, QACTL_LOGGER) if \\\n log_ansible_error else AnsibleException(f'Failed: {ansible_output}')\n\n return ansible_output\n\n finally:\n ansible_playbook.delete_playbook_file()\n shutil.rmtree(runner.config.private_data_dir)\n","repo_name":"wazuh/wazuh-qa","sub_path":"deps/wazuh_testing/wazuh_testing/qa_ctl/provisioning/ansible/ansible_runner.py","file_name":"ansible_runner.py","file_ext":"py","file_size_in_byte":4892,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"21"} +{"seq_id":"2718067774","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom model1 import *\nfrom cnews import *\nfrom sklearn import metrics\nimport numpy as np\nimport time\nimport os, re\nfrom datetime import timedelta\nimport argparse\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"--base_dir\", type=str, default=\"data/\")\nparser.add_argument(\"--test_dir\", type=str, default=\"data/dev.tsv\")\nparser.add_argument(\"--test_result_dir\", type=str, default=\"data/dev_result\")\nparser.add_argument(\"--embedding_dim\", type=int, default=64)\nparser.add_argument(\"--seq_length\", type=int, default=128)\nparser.add_argument(\"--num_filters\", type=int, default=1)\nparser.add_argument(\"--filter_sizes\", type=str, default=\"3\")\nparser.add_argument(\"--vocab_size\", type=int, default=5000)\nparser.add_argument(\"--hidden_dim\", type=int, default=768)\nparser.add_argument(\"--learning_rate\", type=float, default=0.001)\nparser.add_argument(\"--batch_size\", type=int, default=64)\n\nFLAGS = parser.parse_args()\n\nbase_dir = FLAGS.base_dir\ntest_dir = FLAGS.test_dir\nvocab_dir = os.path.join(base_dir, \"vocab.txt\")\nlabel_dir = os.path.join(base_dir, \"label.txt\")\n\nsave_path = os.path.join(base_dir, \"textcnn/model.ckpt\")\nembedding_dim = FLAGS.embedding_dim\nseq_length = FLAGS.seq_length\nnum_filters = FLAGS.num_filters\nfilter_sizes = list(map(int, FLAGS.filter_sizes.split(\",\")))\nvocab_size = FLAGS.vocab_size\nhidden_dim = FLAGS.hidden_dim\nlearning_rate = FLAGS.learning_rate\nbatch_size = FLAGS.batch_size\n\ntest_dir = FLAGS.test_dir\ntest_result_dir = FLAGS.test_result_dir\n\ndef get_time_dif(start_time):\n \"\"\"获取已使用时间\"\"\"\n end_time = time.time()\n time_dif = end_time - start_time\n return timedelta(seconds=int(round(time_dif)))\n\ndef feed_data(x_batch, y_batch, keep_prob):\n feed_dict = {\n model.input_x: x_batch,\n model.input_y: y_batch,\n model.keep_prob: keep_prob\n }\n return feed_dict\n\ndef read_input(file):\n contents, labels = [], []\n with open(file, 'r') as f:\n for line in f:\n try:\n line = line.strip().split('\\t')\n if len(line) == 3: # char+word+label\n char = line[0].strip()\n label = line[-1].strip()\n if len(line) == 2: # char+label\n char = line[0].strip()\n label = line[-1].strip()\n contents.append(char)\n labels.append(label)\n except:\n pass\n return contents, labels\n\ndef evaluate(sess, x_, y_):\n \"\"\"评估在某一数据上的准确率和损失\"\"\"\n data_len = len(x_)\n batch_eval = batch_iter(x_, y_, batch_size)\n total_loss = 0.0\n total_acc = 0.0\n total_rec = 0.0\n for x_batch, y_batch in batch_eval:\n batch_len = len(x_batch)\n feed_dict = feed_data(x_batch, y_batch, 1.0)\n loss, acc, rec = sess.run([model.loss, model.acc, model.rec], feed_dict=feed_dict)\n total_loss += loss * batch_len\n total_acc += acc * batch_len\n total_rec += rec * batch_len\n\n return total_loss / data_len, total_acc / data_len, total_rec / data_len\n\ndef tes():\n print(\"Loading test data...\")\n inputs, labels = read_input(test_dir)\n start_time = time.time()\n x_test, y_test = process_file(test_dir, word_to_id, cat_to_id, seq_length, num_classes)\n\n session = tf.Session()\n session.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n saver.restore(sess=session, save_path=save_path) # 读取保存的模型\n\n print('Testing...')\n loss_test, acc_test, rec_test = evaluate(session, x_test, y_test)\n msg = 'Test Loss: {0:>6.2}, Test Acc: {1:>7.2%}'\n print(msg.format(loss_test, acc_test))\n\n data_len = len(x_test)\n num_batch = int((data_len - 1) / batch_size) + 1\n\n y_test_cls = np.argmax(y_test, 1)\n y_pred_cls = np.zeros(shape=len(x_test), dtype=np.int32) # 保存预测结果\n y_pred_soft = [0]*len(x_test)\n for i in range(num_batch): # 逐批次处理\n start_id = i * batch_size\n end_id = min((i + 1) * batch_size, data_len)\n feed_dict = {\n model.input_x: x_test[start_id:end_id],\n model.keep_prob: 1.0\n }\n y_pred_cls[start_id:end_id], soft = session.run([model.y_pred_cls, model.soft], feed_dict=feed_dict)\n for j in range(end_id-start_id):\n y_pred_soft[j+start_id] = soft[j]\n f = open(test_result_dir, 'w')\n\n print (len(y_pred_cls), len(inputs), len(labels))\n for m in range(len(y_pred_cls)):\n f.write(inputs[m] + \"\\t\" + labels[m] + \"\\t\")\n scores = []\n max_label = \"\"\n max_score = 0.0\n ind = -1\n for n in y_pred_soft[m]:\n ind += 1\n if id_to_cat[ind] == \"negative\":\n continue\n scores.append(str(n))\n if n > max_score:\n max_score = n\n max_label = id_to_cat[ind]\n f.write(max_label + \"\\t\")\n for i in scores:\n f.write(i + \" \")\n f.write(\"\\n\")\n f.close()\n\n # 评���\n print(\"Precision, Recall and F1-Score...\")\n print(metrics.classification_report(y_test_cls, y_pred_cls, target_names=categories, digits=4))\n\n time_dif = get_time_dif(start_time)\n print(\"Time usage:\", time_dif)\n\n\nif __name__ == '__main__':\n print('Configuring CNN model...')\n\n categories, cat_to_id, id_to_cat = read_category(label_dir)\n num_classes = len(cat_to_id)\n words, word_to_id = read_vocab(vocab_dir)\n model = TextCNN(seq_length, num_classes, vocab_size, embedding_dim, filter_sizes, num_filters, hidden_dim,\n learning_rate)\n\n tes()\n","repo_name":"cy565025164/textcnn","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":5632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70704806454","text":"import os\nimport re\nimport hmac\nimport time\nimport jinja2\nimport webapp2\nfrom models import db, post_key, users_key, comment_key, User, Post, Comment, Like\n\n\nsecret = ' q728b3tx8nisr27tmat7y3khrm1i24qhlfakelf2,i3gqh4nx'\n\n\n# Jinja initialization\ntemplate_dir = os.path.join(os.path.dirname(__file__), 'templates')\njinja_env = jinja2.Environment(\n loader=jinja2.FileSystemLoader(template_dir), autoescape=True)\n\n\ndef render_str(template, **params):\n t = jinja_env.get_template(template)\n return t.render(params)\n\n\nclass BlogHandler(webapp2.RequestHandler):\n\n def write(self, *a, **kw):\n self.response.out.write(*a, **kw)\n\n def render_str(self, template, **params):\n params['user'] = self.user\n return render_str(template, **params)\n\n def render(self, template, **kw):\n self.write(self.render_str(template, **kw))\n\n def set_secure_cookie(self, name, val):\n cookie_val = make_secure_val(val)\n self.response.headers.add_header(\n 'Set-Cookie',\n '%s=%s; Path=/' % (name, cookie_val))\n\n def read_secure_cookie(self, name):\n cookie_val = self.request.cookies.get(name)\n return cookie_val and check_secure_val(cookie_val)\n\n def login(self, user):\n self.set_secure_cookie('user_id', str(user.key().id()))\n\n def logout(self):\n self.response.headers.add_header('Set-Cookie', 'user_id=; Path=/')\n\n def initialize(self, *a, **kw):\n webapp2.RequestHandler.initialize(self, *a, **kw)\n uid = self.read_secure_cookie('user_id')\n self.user = uid and User.by_id(int(uid))\n\n\n\n# Security Layer\ndef user_owns_post(self, post):\n if post:\n return self.user.name == post.author.name\n\ndef user_logged_in(self):\n return self.user\n\ndef user_owns_comment(self, comment):\n return self.user.name == comment.author.name\n\n\n# Hashing Methods\ndef make_secure_val(val):\n return '%s|%s' % (val, hmac.new(secret, val).hexdigest())\n\n\ndef check_secure_val(secure_val):\n val = secure_val.split('|')[0]\n if secure_val == make_secure_val(val):\n return val\n\n\n# Input verification methods\nUSER_RE = re.compile(r\"^[a-zA-Z0-9_-]{3,20}$\")\n\n\ndef valid_username(username):\n return username and USER_RE.match(username)\n\nPASS_RE = re.compile(r\"^.{3,20}$\")\n\n\ndef valid_password(password):\n return password and PASS_RE.match(password)\n\n\n\n# Home Page (Login)\nclass MainPage(BlogHandler):\n\n def get(self):\n self.render('mainPage.html')\n\n def post(self):\n username = self.request.get('username')\n password = self.request.get('password')\n\n u = User.login(username, password)\n if u:\n self.login(u)\n self.redirect('/blog')\n else:\n msg = 'Invalid login'\n self.render('mainPage.html', error=msg)\n\n\n\n# Sign Up Page\nclass SignUpPage(BlogHandler):\n\n def get(self):\n self.render(\"signUpPage.html\")\n\n def post(self):\n have_error = False\n self.username = self.request.get('username')\n self.password = self.request.get('password')\n self.verify = self.request.get('verify')\n\n error_message = ''\n\n if not valid_username(self.username):\n error_message = \"That's not a valid username.\"\n self.render('signUpPage.html', error_message=error_message)\n elif not valid_password(self.password):\n error_message = \"That wasn't a valid password.\"\n self.render('signUpPage.html', error_message=error_message)\n elif self.password != self.verify:\n error_message = \"Your passwords didn't match.\"\n self.render('signUpPage.html', error_message=error_message)\n else:\n self.done()\n\n def done(self, *a, **kw):\n raise NotImplementedError\n\n\nclass Register(SignUpPage):\n def done(self):\n # make sure the user doesn't already exist\n u = User.by_name(self.username)\n if u:\n msg = 'That user already exists.'\n self.render('signUpPage.html', error_message=msg)\n else:\n u = User.register(self.username, self.password)\n u.put()\n self.login(u)\n self.redirect('/blog')\n\n\n\n\n# Logout\nclass Logout(BlogHandler):\n\n def get(self):\n self.logout()\n self.redirect('/')\n\n\n\n\n# Blog Page\nclass BlogFront(BlogHandler):\n\n def get(self):\n # checks if user is logged in before showing blog entries\n if user_logged_in(self):\n posts = Post.all().order('-created')\n params = dict(user=self.user, posts=posts)\n time.sleep(0.1)\n self.render('blogPage.html', **params)\n else:\n self.redirect('/')\n\n\n\n\n# New Post Creation\nclass NewPost(BlogHandler):\n\n def get(self):\n # checks if user is logged in\n if not user_logged_in(self):\n self.redirect('/')\n else:\n self.render(\"newPostPage.html\", user=self.user)\n\n def post(self):\n if not user_logged_in(self):\n self.redirect('/')\n\n title = self.request.get('title')\n content = self.request.get('content')\n\n if title and content:\n p = Post(parent=post_key(), title=title,\n content=content, author=self.user)\n p.put()\n self.redirect('/blog')\n else:\n error = \"Both subject and content are required!\"\n self.render(\"newPostPage.html\", title=title,\n content=content, error=error)\n\n\n\n\n# Edit Post Page\nclass EditPost(BlogHandler):\n\n def get(self, post_id):\n if not user_logged_in(self):\n return self.redirect('/')\n key = db.Key.from_path('Post', int(post_id), parent=post_key())\n post = db.get(key)\n if not post:\n return self.error(404)\n if user_owns_post(self, post):\n params = dict(title=post.title, content=post.content,\n user=self.user)\n self.render('editPostPage.html', **params)\n else:\n return self.redirect('/')\n\n def post(self, post_id):\n # checks if user is logged in\n if not user_logged_in(self):\n return self.redirect('/')\n\n key = db.Key.from_path('Post', int(post_id), parent=post_key())\n post = db.get(key)\n if not post:\n return self.error(404)\n\n # checks if the user owns the post\n if not user_owns_post(self, post):\n return self.redirect('/')\n\n title = self.request.get('title')\n content = self.request.get('content')\n\n if title and content:\n post.title = title\n post.content = content\n post.put()\n self.redirect('/blog')\n else:\n error = \"Both subject and content are required!\"\n self.render(\"newPostPage.html\", title=title,\n content=content, error=error)\n\n\n\n\n# Delete Post\nclass DeletePost(BlogHandler):\n\n def get(self, post_id):\n if user_logged_in(self):\n key = db.Key.from_path('Post', int(post_id), parent=post_key())\n post = db.get(key)\n if not post:\n return self.error(404)\n\n if user_owns_post(self, post):\n post.delete()\n self.redirect('/blog')\n else:\n self.redirect('/')\n\n\n# Like/Unlike Post\nclass RatePost(BlogHandler):\n def get(self, post_id):\n if user_logged_in(self):\n key = db.Key.from_path('Post', int(post_id), parent=post_key())\n post = db.get(key)\n if not post:\n return self.error(404)\n if not user_owns_post(self, post):\n self.redirect('/')\n if post.liked(self.user):\n l = db.GqlQuery(\"SELECT * FROM Like WHERE author=:user AND parent_post=:post\", user=self.user, post=post).get()\n l.delete()\n self.redirect('/blog')\n else:\n l = Like(author=self.user, parent_post=post)\n l.put()\n self.redirect('/blog')\n else:\n return self.redirect('/')\n\n\n\n\n# Posts comments\nclass CommentPost(BlogHandler):\n\n def get(self, post_id):\n if user_logged_in(self):\n key = db.Key.from_path('Post', int(post_id), parent=post_key())\n post = db.get(key)\n if not post:\n return self.error(404)\n\n comments = db.GqlQuery(\"SELECT * FROM Comment WHERE parent_post = :post ORDER BY created DESC\", post=post)\n self.render('postComments.html', post=post, comments = comments, user = self.user)\n else:\n return self.redirect('/')\n\n def post(self, post_id):\n if user_logged_in(self):\n key = db.Key.from_path('Post', int(post_id), parent=post_key())\n post = db.get(key)\n if not post:\n return self.error(404)\n # Only posts non-owners can comment\n if not user_owns_post(self, post):\n content = self.request.get('content')\n comment = Comment(parent=comment_key(), content=content, author = self.user, parent_post = post)\n comment.put()\n time.sleep(0.1)\n self.redirect('/blog/%s/comment' % str(post_id))\n else:\n return self.redirect('/blog')\n\n\n\n\n# Edit Comments\nclass EditComment(BlogHandler):\n\n def get(self, post_id, comment_id):\n if user_logged_in(self):\n key = db.Key.from_path('Post', int(post_id), parent=post_key())\n post = db.get(key)\n if not post:\n return self.error(404)\n key = db.Key.from_path('Comment', int(comment_id), parent=comment_key())\n comment = db.get(key)\n if not comment:\n return self.error(404)\n # checks if the user owns the comment\n if user_owns_comment(self, comment):\n params = dict( content=post.content,\n user=self.user)\n self.render('editComment.html', **params)\n else:\n return self.redirect('/')\n\n def post(self, post_id, comment_id):\n # checks if user is logged in\n if user_logged_in(self):\n key = db.Key.from_path('Post', int(post_id), parent=post_key())\n post = db.get(key)\n if not post:\n return self.error(404)\n key = db.Key.from_path('Comment', int(comment_id), parent=comment_key())\n comment = db.get(key)\n if not comment:\n return self.error(404)\n # checks if the user owns the comment\n if user_owns_comment(self, comment):\n content = self.request.get('content')\n if content:\n comment.content = content\n comment.put()\n time.sleep(0.1)\n self.redirect('/blog/%s/comment' % str(post_id))\n else:\n error = \"Some content is required!\"\n self.render(\"editComment.html\",\n content=content, error=error)\n else:\n return self.redirect('/')\n\n\n\n# Delete Comments\nclass DeleteComment(BlogHandler):\n\n def get(self, post_id, comment_id):\n if user_logged_in(self):\n key = db.Key.from_path('Post', int(post_id), parent=post_key())\n post = db.get(key)\n if not post:\n return self.error(404)\n key = db.Key.from_path('Comment', int(comment_id), parent=comment_key())\n comment = db.get(key)\n if not comment:\n return self.error(404)\n if user_owns_comment(self, comment):\n comment.delete()\n time.sleep(0.1)\n self.redirect('/blog/%s/comment' % str(post_id))\n else:\n self.redirect('/')\n\n\n\napp = webapp2.WSGIApplication([('/', MainPage),\n ('/signup', Register),\n ('/logout', Logout),\n ('/blog', BlogFront),\n ('/blog/newpost', NewPost),\n ('/blog/([0-9]+)/edit', EditPost),\n ('/blog/([0-9]+)/remove', DeletePost),\n ('/blog/([0-9]+)/comment', CommentPost),\n ('/blog/([0-9]+)/rate', RatePost),\n ('/blog/([0-9]+)/comment/([0-9]+)/edit', EditComment),\n ('/blog/([0-9]+)/comment/([0-9]+)/delete', DeleteComment)\n ],\n debug=True)\n\n\n","repo_name":"albe-rosado/multiblog","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15012088779","text":"\r\nimport time\r\n\r\nruns = 10000\r\n\r\n# Smallest multiple\r\n\r\ndef main():\r\n value = 2520\r\n aux_end = 0\r\n max_range = 20\r\n\r\n while True:\r\n if value % max_range == 0:\r\n for divisor in range(3, max_range):\r\n if value % divisor != 0:\r\n value += 10\r\n break;\r\n elif divisor + 1 == max_range:\r\n aux_end = 1\r\n else:\r\n value += 1\r\n\r\n if aux_end == 1:\r\n break\r\n\r\n return value\r\n\r\nif __name__ == \"__main__\":\r\n answer = 0\r\n total_time = 0\r\n\r\n for run in range(0, runs):\r\n start_time = time.time()\r\n answer = main()\r\n run_time = time.time() - start_time\r\n total_time += run_time\r\n\r\n print(\"answer --- {} ---\".format(answer))\r\n print(\"runtime --- {:.4f} ms ---\".format((total_time * 1000) / runs))\r\n\r\n","repo_name":"lucasviniciusrodrigues/euler","sub_path":"Lucas/euler/problems/Problem005.py","file_name":"Problem005.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29328276755","text":"from socket import *\r\nimport random\r\n\r\nserverName = '192.168.8.100'\r\nserverPort = 12000\r\nclientPort=random.randint(10000,20000)\r\ndef send(message):\r\n clientSocket = socket(AF_INET,SOCK_DGRAM)\r\n clientSocket.bind(('',clientPort))\r\n clientSocket.sendto(message.encode(),(serverName,serverPort))\r\n modifiedMessage,serverAddress=clientSocket.recvfrom(2048)\r\n #clientSocket.close()\r\n print(modifiedMessage.decode())\r\n\r\ndef first_connect(id):\r\n clientSocket = socket(AF_INET,SOCK_DGRAM)\r\n clientSocket.bind(('',clientPort))\r\n clientSocket.sendto(('@'+id).encode(),(serverName,serverPort))\r\n\r\n \r\n \r\n \r\n\r\n\r\n","repo_name":"iDragon-yang/xiaozhiquan","sub_path":"software/UDPClient.py","file_name":"UDPClient.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2802219211","text":"import win32gui, win32ui, win32con\nfrom ctypes import windll\nfrom PIL import Image\nimport cv2\nimport numpy\n\n#获取后台窗口的句柄,注意后台窗口不能最小化\nhWnd = win32gui.FindWindow(0,\"魔兽世界\") #窗口的类名可以用Visual Studio的SPY++工具获取\n#获取句柄窗口的大小信息\nleft, top, right, bot = win32gui.GetWindowRect(hWnd)\nwidth = right - left\nheight = bot - top\n#返回句柄窗口的设备环境,覆盖整个窗口,包括非客户区,标题栏,菜单,边框\nhWndDC = win32gui.GetWindowDC(hWnd)\n#创建设备描述表\nmfcDC = win32ui.CreateDCFromHandle(hWndDC)\n#创建内存设备描述表\nsaveDC = mfcDC.CreateCompatibleDC()\n#创建位图对象准备保存图片\nsaveBitMap = win32ui.CreateBitmap()\n#为bitmap开辟存储空间\nsaveBitMap.CreateCompatibleBitmap(mfcDC,width,height)\n#将截图保存到saveBitMap中\nsaveDC.SelectObject(saveBitMap)\n#保存bitmap到内存设备描述表\nsaveDC.BitBlt((0,0), (width,height), mfcDC, (0, 0), win32con.SRCCOPY)\n\n#如果要截图到打印设备:\n###最后一个int参数:0-保存整个窗口,1-只保存客户区。如果PrintWindow成功函数返回值为1\nresult = windll.user32.PrintWindow(hWnd,saveDC.GetSafeHdc(),0)\nprint(result) #PrintWindow成功则输出1\n\n#保存图像\n##方法一:windows api保存\n###保存bitmap到文件\nsaveBitMap.SaveBitmapFile(saveDC,\"img_Winapi.bmp\")","repo_name":"haluo/wowUp","sub_path":"test/t3.py","file_name":"t3.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20462461864","text":"\"\"\"\nCreated on 2011-11-03\n\n@author: alexandre\n\"\"\"\nfrom __future__ import unicode_literals\nimport cherrypy\nfrom util import read_pickle\nimport htmlView\nfrom os import path\nimport subprocess as sp\nimport os\n\nworkFolder = '/tmp/happy-league'\nmakeSchedulePath = path.join(path.dirname(__file__), '..', \"makeSchedule.py\")\n\ntry:\n os.makedirs(workFolder)\nexcept OSError:\n pass\n\n\nclass ScheduleServer(object):\n def __init__(self, workFolder=None):\n self.workFolder = workFolder\n self.schPath = path.join(workFolder, 'sch.pkl')\n self.subProcess = None\n self.sig = None\n\n @cherrypy.expose\n def index(self):\n menu = \"\"\"\\\n \n \n Happy League\n \n \n \n {}\n \n \n \"\"\".format(htmlView.HtmlMenu().__html__())\n return menu\n\n @cherrypy.expose\n def new(self):\n if self.subProcess is not None: # may be running\n if self.subProcess.poll() is None: # is running\n\n return \"\"\"\n \n \n \n \n \n \n {}\n
\n Currently running.\n
\n stop\n
\n \n \"\"\".format(htmlView.HtmlMenu().__html__())\n else:\n code = self.subProcess.poll()\n self.subProcess = None\n self.sentSig = None\n return \"\"\"\n last process ended with code %d\n \"\"\" % code\n\n return \"\"\"\n \n \n \n \n \n \n {}\n
\n

Upload a file

\n
\n \n \n \n \n \n \n \n
Configuration file
Runtime (minutes)
\n
\n \n\n
\n
\n \n \"\"\".format(htmlView.HtmlMenu().__html__())\n\n @cherrypy.expose\n def stop(self):\n if self.subProcess is not None: # may be running\n if self.subProcess.poll() is None: # is running\n self.subProcess.terminate()\n self.sentSig = 'SIGTERM'\n return \"Sent the terminate signal.\"\n else:\n code = self.subProcess.poll()\n self.subProcess = None\n self.sentSig = None\n return \"process ended with code %d\" % code\n else:\n return \"no process was running\"\n\n @cherrypy.expose\n def upload(self, myFile, minutes):\n\n print('upload')\n filePath = path.join(self.workFolder, 'xlsConfig.xls')\n with open(filePath, 'wb') as fd:\n while True:\n data = myFile.file.read(8192)\n if not data:\n break\n fd.write(data)\n\n print('file Saved')\n print(' '.join([makeSchedulePath, self.workFolder, minutes]))\n\n self.subProcess = sp.Popen([makeSchedulePath, self.workFolder, minutes], stdout=open(path.join(self.workFolder, 'stdout'), 'w'))\n print('process started')\n return \"\"\"\n \n \n \n \n \n \n {}\n
\n

Started for {} minutes...

\n
\n \n \"\"\".format(htmlView.HtmlMenu().__html__(), minutes)\n\n def _formatStdout(self, name='stdout', **fontAttr):\n strL = []\n strL.append('

%s

' % name)\n\n file_path = os.path.join(self.workFolder, name)\n if os.path.exists(file_path):\n with open(file_path) as f:\n stdout = f.read()\n stdout = stdout.replace('\\n', '
\\n')\n stdout = stdout.replace(' ', ' ')\n strL.append(htmlView.HtmlTag('tt', htmlView.Font(stdout, **fontAttr)).__html__())\n\n return strL\n\n @cherrypy.expose\n def debug(self):\n return \"\"\"\n \n \n \n \n \n \n {}\n
\n {}\n
\n \n \"\"\".format(htmlView.HtmlMenu().__html__(), '\\n'.join(self._formatStdout() + self._formatStdout('stderr', color='red')))\n\n def _buildAnalysisPage(self, builder, refresh=False):\n if not path.exists(self.schPath):\n return \"no schedule\"\n return \"\"\"\n \n \n \n \n \n \n {}\n
\n No Schedule.\n
\n \n \"\"\".format(htmlView.HtmlMenu().__html__())\n\n config, matchL, optState = read_pickle(self.schPath)\n doc = htmlView.HtmlDoc(builder(config, matchL, optState))\n if refresh:\n doc.head.add(htmlView.HtmlRefresh())\n return doc.__html__()\n\n @cherrypy.expose\n def overview(self):\n return self._buildAnalysisPage(htmlView.HtmlAnalysis)\n\n @cherrypy.expose\n def schedule(self):\n return self._buildAnalysisPage(htmlView.HtmlSchedule)\n\n @cherrypy.expose\n def conflicts(self):\n return self._buildAnalysisPage(htmlView.HtmlConflict)\n\n @cherrypy.expose\n def config(self):\n return self._buildAnalysisPage(htmlView.HtmlConfig)\n\n @cherrypy.expose\n def teamGrpConflicts(self):\n return self._buildAnalysisPage(htmlView.HtmlTeamGroup)\n\n @cherrypy.expose\n def uniformity(self):\n return self._buildAnalysisPage(htmlView.HtmlUniformity)\n\n @cherrypy.expose\n def restrictions(self):\n return self._buildAnalysisPage(htmlView.HtmlRestriction)\n\ncurrent_dir = os.path.dirname(os.path.abspath(__file__)) + os.path.sep\n\nif __name__ == \"__main__\":\n cherrypy.quickstart(ScheduleServer(workFolder),\n config={\n '/': {\n 'tools.staticdir.root': current_dir,\n },\n '/css': {\n 'tools.staticdir.on': True,\n 'tools.staticdir.dir': 'static/css',\n },\n '/js': {\n 'tools.staticdir.on': True,\n 'tools.staticdir.dir': 'static/js',\n },\n '/fonts': {\n 'tools.staticdir.on': True,\n 'tools.staticdir.dir': 'static/fonts',\n }\n })\n","repo_name":"jeanfrancisroy/happy-league","sub_path":"cherryPyServer/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":8071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16417215745","text":"from django.urls import reverse\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\n\n\nclass AccountTests(APITestCase):\n\n def test_user_statistic(self):\n \"\"\"\n Ensure we can get user statistic.\n \"\"\"\n url = reverse('user_statistic')\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn('total_signup', response.data)\n self.assertIn('today_active_session', response.data)\n self.assertIn('average_active_session', response.data)\n\n def test_user_list(self):\n \"\"\"\n Ensure we can get user list.\n \"\"\"\n url = reverse('user-list')\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n","repo_name":"Krisnadi/mtx-simple-app","sub_path":"user/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12292816110","text":"#-*- coding: utf-8 -*-\nimport xlrd\nimport xlwt\nfrom xlutils.copy import copy\n\nshm_row = 1\n#打开3000路建设进展表\ncamera_xl = xlrd.open_workbook('./二期3000路人脸建设进展.xlsx')\nsheetOrig = camera_xl.sheet_by_name('3000路建设_进度明细表')\ncol_values = sheetOrig.col_values(5)\n\ndic_shm = {0:'quyu', 1:'chengshi', 2:'yinqingdanyuan', 3:'wenhoushi', 4:'yonghuming', 5:'mima', 6:'shexiangtoumingcheng', 7:'shexiangtouIP', 8:'port', 9:'shexiangtouzuobiao', 10:'shebeibianma', 11:'caijileixing', 12:'shipinbofangdizhi', 13:'bendi'}\ndic_shm_val = {}\ndic_shm_val['yinqingdanyuan'] = input(\"请输入引擎单元:\")\n\n#生成摄像机名称一列的字典\ndic_col = {str(i):col_values[i] for i in range(0,len(col_values))}\n\ncamera_file = open('./camera.txt')\nfor line in camera_file:\n\tadd_name = line.replace('\\n', '')\n\tif add_name == '':\n\t\tbreak\n\tnumber_name = list(dic_col.keys())[list(dic_col.values()).index(add_name)]\t\n\trow_values = sheetOrig.row_values(int(number_name))\n\t\n\tdic_shm_val['quyu'] = row_values[7]\n\tdic_shm_val['chengshi'] = '青岛'\n\tdic_shm_val['wenhoushi'] = '非问候室'\n\tdic_shm_val['yonghuming'] = 'admin'\n\tdic_shm_val['mima'] = 'qdls1234'\n\tdic_shm_val['shexiangtoumingcheng'] = add_name\n\tdic_shm_val['shexiangtouIP'] = row_values[10]\n\tdic_shm_val['port'] = 8000\n\tdic_shm_val['shexiangtouzuobiao'] = str(row_values[13])+','+str(row_values[14])\n\tdic_shm_val['caijileixing'] = '海康抓拍模式'\n\tdic_shm_val['shipinbofangdizhi'] = 'rtsp://'+dic_shm_val['yonghuming']+':'+dic_shm_val['mima']+'@'+dic_shm_val['shexiangtouIP']\n\tdic_shm_val['shebeibianma'] = ''\n\tdic_shm_val['bendi'] = '是'\n\t\n\t#向深目批量添加摄像头内容\n\tfor shm_num in range(0, 14):\n\t\tworkBook = xlrd.open_workbook('./点位上传表.xlsx')\n\t\tnewWb = copy(workBook)\n\t\tnewWs = newWb.get_sheet(1)\n\t\tnewWs.write(shm_row, shm_num,dic_shm_val[dic_shm[shm_num]])\n\t\tnewWb.save('./点位上传表.xlsx')\n\tshm_row = shm_row + 1\ncamera_file.close()\nprint('添加结束!')\n","repo_name":"password442619/Python_Script","sub_path":"shenmu_Add_camera.py","file_name":"shenmu_Add_camera.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40881461001","text":"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport openpyxl\r\n\r\n#导入数据\r\n# 1.电力用户用电分类\r\n# df=pd.read_csv(\"F:\\\\软件杯\\\\数据\\\\user_log2_count2.csv\")\r\n# x_axis=\"power_2015_average\"\r\n# y_axis=\"power_2016_average\"\r\n\r\n# # 2. 能源分类\r\n# df=pd.read_csv(\"F:\\\\软件杯\\\\数据\\\\household_power_consumption.csv\")\r\n# x_axis=\"Global_active_power\"\r\n# y_axis=\"Global_intensity\"\r\n\r\n# # 3. 峰值分类\r\n# df=pd.read_csv(\"F:\\\\软件杯\\\\数据\\\\峰值.csv\")\r\n# x_axis=\"average\"\r\n# y_axis=\"average_norm\"\r\n\r\n# #4.电力用户缴费行文分类\r\n# df=pd.read_csv(\"F:\\\\软件杯\\\\数据\\\\user_log_count.csv\")\r\n# x_axis=\"money_sum\"\r\n# y_axis=\"money_average\"\r\n\r\n# #5.激光企业分类\r\n# df=pd.read_csv(\"F:\\\\软件杯\\\\数据\\\\激光企业.csv\")\r\n# x_axis=\"综合倍率\"\r\n# y_axis=\"总用电量\"\r\n\r\n#6.省份GDP电量关系\r\ndf=pd.read_csv(\"F:\\\\软件杯\\\\数据\\\\省份GDP电量关系.csv\")\r\nx_axis=\"近10年年均GDP排行\"\r\ny_axis=\"近10年年均用电量排行\"\r\n\r\nnum_examples=df.shape[0]\r\nx_train=df[[x_axis,y_axis]].values.reshape(num_examples,2)\r\n\r\nfrom sklearn.cluster import KMeans\r\nSSE = [] # 存放每次结果的误差平方和\r\nfor k in range(1, 9):\r\n estimator = KMeans(n_clusters=k) # 构造聚类器\r\n estimator.fit(x_train)\r\n SSE.append(estimator.inertia_)\r\nX = range(1, 9)\r\n\r\nplt.figure(figsize=(15, 10))\r\nplt.xlabel('k')\r\nplt.ylabel('SSE')\r\nplt.plot(X, SSE, 'o-')\r\nplt.show()\r\n","repo_name":"Amber1132518/task-5","sub_path":"k值.py","file_name":"k值.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"41047732509","text":"# Problem: https://adventofcode.com/2020/day/6#part2\n\nfrom pathlib import Path\nimport re\n\nINPUT = str(Path(__file__).parent.absolute()) + '/input.txt'\n\nyes_quests = set()\nquests_list = []\nrunning_total = 0\n\nwith open(INPUT) as f:\n for line in f.readlines():\n\n if line.strip() == \"\":\n every_yes = quests_list[0]\n for s in quests_list:\n every_yes = every_yes & s\n\n running_total += len(every_yes)\n quests_list = []\n else:\n for item in list(line.strip()):\n yes_quests.add(item) \n\n quests_list.append(yes_quests)\n yes_quests = set()\n \n running_total += len(yes_quests)\n\nprint(running_total)\n \n# Result: 3628\n\n","repo_name":"gdep/AoC_2020","sub_path":"Python/Day06/Day6_02.py","file_name":"Day6_02.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25124253299","text":"# -*- coding: utf8 -*-\r\n\r\nimport requests, os, sys, json\r\n\r\nsys.path.append(\".\")\r\n\r\nimport logging\r\n\r\nlogging.basicConfig(level=logging.INFO, format=\"%(message)s\")\r\nlogger = logging.getLogger(__name__)\r\ntry:\r\n from pusher import pusher\r\nexcept:\r\n\r\n def pusher(*args):\r\n pass\r\n\r\n\r\ntry:\r\n from notify import send as pusher\r\nexcept:\r\n logger.info(\"无青龙推送文件\")\r\n\r\ncookie = os.environ.get(\"cookie_pt\")\r\npt_website = os.environ.get(\"pt_website\")\r\nproxy_url_http = os.environ.get(\"proxy_url_http\")\r\nproxy_url_https = os.environ.get(\"proxy_url_https\")\r\nif proxy_url_http and proxy_url_https:\r\n proxies = {\"http\": proxy_url_http, \"https\": proxy_url_https}\r\nelse:\r\n proxies = None\r\n\r\n\r\ndef main(cookie, website):\r\n s = requests.Session()\r\n if os.path.exists(\"./ptconfig.json\"):\r\n with open(\"ptconfig.json\", \"r\", encoding=\"utf8\") as f:\r\n data = json.load(f)\r\n try:\r\n vote_id = data[website][\"vote_id\"]\r\n if vote_id == \"disable\":\r\n return f\"{website} 不需要投票\\n\"\r\n except:\r\n try:\r\n data[website][\"vote_id\"] = \"disable\"\r\n except:\r\n data[website] = {}\r\n data[website][\"vote_id\"] = \"disable\"\r\n with open(\"./ptconfig.json\", \"w\", encoding=\"utf8\") as f:\r\n json.dump(data, f, ensure_ascii=False)\r\n return f\"{website} 投票初始化,自行修改起始投票id才会开始投票\\n\"\r\n else:\r\n try:\r\n data = {}\r\n data[website] = {}\r\n data[website][\"vote_id\"] = \"disable\"\r\n with open(\"./ptconfig.json\", \"w\", encoding=\"utf8\") as f:\r\n json.dump(data, f, ensure_ascii=False)\r\n return f\"{website} 投票初始化,自行修改起始投票id才会开始投票\\n\"\r\n except:\r\n msg = \"warning::: ptVote.py无法写入ptconfig.json,请使用有读写权限的环境运行脚本\\n\"\r\n logger.info(msg)\r\n pusher(\"Checkinbox通知\", msg)\r\n return msg\r\n url = f'{website.replace(\"index.php\", \"fun.php\")}?action=vote&id={vote_id}&yourvote=fun'\r\n\r\n headers = {\r\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36Safari/537.36\",\r\n \"Accept-Language\": \"zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7\",\r\n \"Cookie\": cookie,\r\n \"Referer\": website,\r\n }\r\n\r\n r = s.get(url, headers=headers, proxies=proxies, verify=False)\r\n data[website][\"vote_id\"] = int(vote_id) + 1\r\n if not r.text:\r\n msg = \"趣味盒投票有趣\"\r\n with open(\"./ptconfig.json\", \"w\", encoding=\"utf8\") as f:\r\n json.dump(data, f, ensure_ascii=False)\r\n elif \"你已经投过票了!\" in r.text:\r\n msg = \"你已经投过票了!\"\r\n with open(\"./ptconfig.json\", \"w\", encoding=\"utf8\") as f:\r\n json.dump(data, f, ensure_ascii=False)\r\n elif \"无效的ID\" in r.text:\r\n msg = \"无效的ID\"\r\n else:\r\n msg = \"cookie失效\"\r\n pusher(\"Checkinbox通知\", f\"PT站点{website} Cookie过期\\n{r.text[:200]}\")\r\n return msg + \"\\n\"\r\n\r\n\r\ndef main_handler(*args):\r\n msg = \"\"\r\n global cookie, pt_website\r\n if \"\\\\n\" in cookie:\r\n clist = cookie.split(\"\\\\n\")\r\n weblist = pt_website.split(\"\\\\n\")\r\n else:\r\n clist = cookie.split(\"\\n\")\r\n weblist = pt_website.split(\"\\n\")\r\n i = 0\r\n while i < len(clist):\r\n msg += f\"第 {i+1} 个网站开始执行任务\\n\"\r\n cookie = clist[i]\r\n website = weblist[i]\r\n msg += main(cookie, website)\r\n i += 1\r\n return msg[:-1]\r\n\r\n\r\nif __name__ == \"__main__\":\r\n if cookie:\r\n logger.info(\"----------PTwebsite_Vote开始尝试签到----------\")\r\n logger.info(main_handler())\r\n logger.info(\"----------PTwebsite_Vote签到执行完毕----------\")\r\n","repo_name":"mengshouer/ToolBox","sub_path":"CheckinBox/ptWebsite/ptVote.py","file_name":"ptVote.py","file_ext":"py","file_size_in_byte":3923,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"21"} +{"seq_id":"73100185653","text":"import scrapy\nimport re\n\nclass ParariusScraper(scrapy.Spider):\n name = 'Pararius'\n pararius_prefix = 'https://www.pararius.nl'\n\n def start_requests(self):\n urls = [\n 'https://www.pararius.nl/koopwoningen/nederland'\n ]\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse)\n\n def parse(self, response, **kwargs):\n for list_item in response.css('li.search-list__item--listing'):\n url = list_item.css('a.listing-search-item__link--depiction::attr(href)').get()\n id_regex = re.search(\"/.*?/.*?/(.*?)/\", url)\n\n if url is not None:\n realtor_div = response.css('div.listing-search-item__info')\n\n listItem = {\n 'id': id_regex.group(1),\n 'url': self.pararius_prefix + url,\n 'address': list_item.css('div.listing-search-item__location::text').get(),\n 'price': list_item.css('span.listing-search-item__price::text').get(),\n 'realtor_name': realtor_div.css('a.listing-search-item__link::text').get(),\n 'realtor_url': self.pararius_prefix + realtor_div.css('a.listing-search-item__link::attr(href)').get(),\n }\n\n yield scrapy.Request(\n url=self.pararius_prefix + url,\n callback=self.parse_detail_page,\n meta={'item': listItem}\n )\n\n next_page = response.css('a.pagination__link--next::attr(href)').get()\n if next_page is not None:\n yield response.follow(self.pararius_prefix + next_page, self.parse)\n\n def parse_detail_page(self, response):\n item = response.meta.get('item', {})\n item['building'] = {}\n item['building']['images'] = {}\n for num, picture_div in enumerate(response.css('div.picture--media-carrousel')):\n item['building']['images'][str(num)] = picture_div.css('img.picture__image::attr(src)').get()\n\n offered_since_dd = response.css('dd.listing-features__description--offered_since')\n item['building']['since'] = offered_since_dd.css('span::text').get()\n\n status_dd = response.css('dd.listing-features__description--status')\n item['building']['status'] = status_dd.css('span::text').get()\n\n surface_dd = response.css('dd.listing-features__description--surface_area')\n item['building']['surface_area'] = surface_dd.css('span::text').get()\n\n volume_dd = response.css('dd.listing-features__description--volume')\n item['building']['volume'] = volume_dd.css('span::text').get()\n\n kvk_contribution_dd = response.css('dd.listing-features__description--monthly_contribution')\n item['building']['kvk_contribution'] = kvk_contribution_dd.css('span::text').get()\n\n kvk_reserve_funds_dd = response.css('dd.listing-features__description--reserve_fund')\n item['building']['kvk_reserve_funds'] = kvk_reserve_funds_dd.css('span::text').get()\n\n dwelling_type_dd = response.css('dd.listing-features__description--dwelling_type')\n item['building']['house_type'] = dwelling_type_dd.css('span::text').get()\n\n property_type_dd = response.css('dd.listing-features__description--property_types')\n item['building']['property_type'] = property_type_dd.css('span::text').get()\n\n construction_type_dd = response.css('dd.listing-features__description--construction_type')\n item['building']['construction_type'] = construction_type_dd.css('span::text').get()\n\n build_year_dd = response.css('dd.listing-features__description--construction_period')\n item['building']['construction_period'] = build_year_dd.css('span::text').get()\n\n number_of_rooms = response.css('dd.listing-features__description--number_of_rooms')\n item['building']['rooms'] = number_of_rooms.css('span::text').get()\n\n number_of_bedrooms = response.css('dd.listing-features__description--number_of_bedrooms')\n item['building']['bedrooms'] = number_of_bedrooms.css('span::text').get()\n\n number_of_bathrooms = response.css('dd.listing-features__description--number_of_bathrooms')\n item['building']['bathrooms'] = number_of_bathrooms.css('span::text').get()\n\n number_of_floors = response.css('dd.listing-features__description--number_of_floors')\n item['building']['floors'] = number_of_floors.css('span::text').get()\n\n facilities = response.css('dd.listing-features__description--facilities')\n item['building']['facilities'] = facilities.css('span::text').get()\n\n location = response.css('dd.listing-features__description--situations')\n item['building']['location'] = location.css('span::text').get()\n\n balcony = response.css('dd.listing-features__description--balcony')\n item['building']['balcony'] = balcony.css('span::text').get()\n\n garden = response.css('dd.listing-features__description--garden')\n item['building']['garden'] = garden.css('span::text').get()\n\n insulations = response.css('dd.listing-features__description--insulations')\n item['building']['insulations'] = insulations.css('span::text').get()\n\n heatings = response.css('dd.listing-features__description--heatings')\n item['building']['heatings'] = heatings.css('span::text').get()\n\n water_heatings = response.css('dd.listing-features__description--water_heatings')\n item['building']['water_heatings'] = water_heatings.css('span::text').get()\n\n boiler = response.css('dd.listing-features__description--heating_boiler')\n item['building']['boiler'] = boiler.css('span::text').get()\n\n storage = response.css('dd.listing-features__description--storage')\n item['building']['storage'] = storage.css('span::text').get()\n\n storage_description = response.css('dd.listing-features__description--description')\n item['building']['storage_description'] = storage_description.css('span::text').get()\n\n parking = response.css('dd.listing-features__description--parking')\n item['building']['parking'] = parking.css('span::text').get()\n\n garage = response.css('dd.listing-features__description--available')\n item['building']['has_garage'] = garage.css('span::text').get()\n\n return item\n","repo_name":"zoidboi/house-scraping-platform","sub_path":"pararius/pararius/spiders/pararius_spider.py","file_name":"pararius_spider.py","file_ext":"py","file_size_in_byte":6361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19450750090","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('zoom_vols', '0017_auto_20180119_0913'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='hours',\n name='volunteer_activity',\n field=models.CharField(null=True, max_length=30, blank=True),\n ),\n migrations.AlterField(\n model_name='volunteer',\n name='contact_pref',\n field=models.CharField(null=True, max_length=16, choices=[('Call', 'CALL'), ('Text', 'TEXT'), ('Email', 'EMAIL'), ('Mail', 'MAIL'), ('Facebook', 'FACEBOOK'), ('Do Not Contact', 'DO_NOT_CONTACT')], blank=True),\n ),\n migrations.AlterField(\n model_name='volunteergroup',\n name='volunteer_activity',\n field=models.CharField(null=True, max_length=30, blank=True),\n ),\n migrations.DeleteModel(\n name='Activity',\n ),\n ]\n","repo_name":"5klynna5/zoom_c","sub_path":"zoom_vols/migrations/0018_auto_20180119_0953.py","file_name":"0018_auto_20180119_0953.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5414066556","text":"import os\nimport csv\nimport cv2\nimport numpy as np\nfrom random import shuffle\nimport sklearn\n\ndef add_sample_from_file(filename,samples):\n\twith open('../driving_log_' + postfix + '.csv') as csvfile:\n\t\treader = csv.reader(csvfile)\n\t\tfor line in reader:\n\t\t\tif(len(line) > 1):\n\t\t\t\tline[0] = line[0].replace(\"IMG\",\"IMG_\" + postfix)\n\t\t\t\tsamples.append(line)\n\treturn samples\n\nsamples = []\nsamples = add_sample_from_file('../driving_log_210918.csv',samples)\nsamples = add_sample_from_file('../driving_log_curvy1.csv',samples)\nsamples = add_sample_from_file('../driving_log_curvy2.csv',samples)\nsamples = add_sample_from_file('../driving_log_recovery1.csv',samples)\nsamples = add_sample_from_file('../driving_log_300918.csv',samples)\nsamples = add_sample_from_file('../driving_log_curve021018.csv',samples)\nsamples = add_sample_from_file('../driving_log_curve031018.csv',samples)\n\nfrom sklearn.model_selection import train_test_split\ntrain_samples, validation_samples = train_test_split(samples, test_size=0.2)\n\ndef generator(samples, batch_size=32):\n\tnum_samples = len(samples)\n\twhile 1: # Loop forever so the generator never terminates\n\t\tshuffle(samples)\n\t\tfor offset in range(0, num_samples, batch_size):\n\t\t\tbatch_samples = samples[offset:offset+batch_size]\n\t\t\t\n\t\t\timages = []\n\t\t\tangles = []\n\t\t\tfor batch_sample in batch_samples:\n\t\t\t\tname = batch_sample[0]\n\t\t\t\tcenter_image = cv2.imread(name)\n\t\t\t\tcenter_angle = float(batch_sample[3])\n\t\t\t\timages.append(center_image)\n\t\t\t\tangles.append(center_angle)\n\t\t\t\timage_flipped = np.fliplr(center_image)\n\t\t\t\tmeasurement_flipped = -center_angle\n\t\t\t\timages.append(image_flipped)\n\t\t\t\tangles.append(measurement_flipped)\n\t\t\t\n\t\t\t# trim image to only see section with road\n\t\t\tX_train = np.array(images)\n\t\t\ty_train = np.array(angles)\n\t\t\tyield sklearn.utils.shuffle(X_train, y_train)\n\n# compile and train the model using the generator function\ntrain_generator = generator(train_samples, batch_size=64)\nvalidation_generator = generator(validation_samples, batch_size=64)\n\nfrom keras.models import Sequential\nfrom keras.layers import Flatten, Dense, Lambda, Cropping2D, Dropout\nfrom keras.layers.convolutional import Convolution2D\nfrom keras.layers.pooling import MaxPooling2D\n\nmodel = Sequential()\nmodel.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160,320,3)))\nmodel.add(Cropping2D(cropping=((50,20), (0,0))))\nmodel.add(MaxPooling2D())\nmodel.add(Convolution2D(24,(5,5),activation='relu',padding='valid'))\nmodel.add(Convolution2D(36,(5,5),activation='relu',padding='valid'))\nmodel.add(Convolution2D(48,(5,5),activation='relu',padding='valid'))\nmodel.add(MaxPooling2D())\nmodel.add(Convolution2D(64,(3,3),activation='relu',padding='valid'))\nmodel.add(Convolution2D(64,(3,3),activation='relu',padding='valid'))\nmodel.add(MaxPooling2D())\n#model.add(Dropout(0.5))\nmodel.add(Flatten())\nmodel.add(Dense(1164, activation='relu'))\nmodel.add(Dense(100, activation='relu'))\nmodel.add(Dense(50, activation='relu'))\nmodel.add(Dense(10, activation='relu'))\nmodel.add(Dense(1))\n\nmodel.compile(loss='mse', optimizer='adam')\nmodel.fit_generator(train_generator, samples_per_epoch= len(train_samples), validation_data=validation_generator, nb_val_samples=len(validation_samples), nb_epoch=3)\n\nmodel.save('model.h5')","repo_name":"derzaarsad/behavioral_cloning","sub_path":"behaviour-cloning.py","file_name":"behaviour-cloning.py","file_ext":"py","file_size_in_byte":3229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19453690263","text":"bl_info = {\r\n \"name\": \"Raytracing Vehicle\",\r\n \"author\": \"meeww\",\r\n \"version\": (1, 0),\r\n \"blender\": (2, 80, 0),\r\n \"location\": \"3D Viewport > Scene > Raytracing AI\",\r\n \"description\": \"Adds a raytracing ai.\",\r\n \"warning\": \"\",\r\n \"doc_url\": \"\",\r\n \"category\": \"Add Raytracing AI\",\r\n}\r\nimport bpy\r\nimport math\r\n\r\n\r\nclass Globals():\r\n running = 0;\r\n stopping = 0;\r\n debug = 0;\r\nclass Properties(bpy.types.PropertyGroup):\r\n max_speed : bpy.props.FloatProperty(default = 1,name = \"Max Speed\",soft_min = 0, soft_max = 90); \r\n max_acceleration : bpy.props.FloatProperty(default = 1,name = \"Max Acceleration\",soft_min = 0, soft_max = 90); \r\n steering_force : bpy.props.FloatProperty(default = 1,name = \"Steering Force\",soft_min = 0, soft_max = 1); \r\n\r\ndef runButton(context,mytool):\r\n C = bpy.context \r\n \r\n # custom property from Object \"RUN PROGRAM\"\r\n def runProgram(scene):\r\n running = Globals.running\r\n stopping = Globals.stopping;\r\n if running or stopping: # run program if custom property run = 1\r\n\r\n rayCollection = bpy.data.collections['Controllers']; #raycast controller object\r\n \r\n for colls in rayCollection.children: #run for every car\r\n \r\n for obj in colls.children: \r\n \r\n if 'Car Constraints' in obj.name: #locate constraints collection\r\n \r\n for constraints in obj.children:\r\n \r\n if 'Motors' in constraints.name: #locate motors collection\r\n for motors in constraints.objects:\r\n if 'SteeringMotor' in motors.name:\r\n steering = motors; #get motor constraints\r\n if 'FL_Motor' in motors.name:\r\n motorFL = motors;\r\n if 'FR_Motor' in motors.name:\r\n motorFR = motors; \r\n if 'RL_Motor' in motors.name:\r\n motorRL = motors;\r\n if 'RR_Motor' in motors.name:\r\n motorRR = motors; \r\n \r\n for obj in colls.objects: \r\n if 'raycast_Controller' in obj.name:\r\n memoryObject = C.view_layer.objects.active #depsgraph requires setting an object to active\r\n C.view_layer.objects.active = obj; #so we save the current selected object to memory \r\n #so that we can reset it back afterwards.\r\n \r\n deps = obj.evaluated_get(C.evaluated_depsgraph_get()) #depsgraph gets the active object's custom properties\r\n detector = deps.data #save data to variable\r\n \r\n C.view_layer.objects.active = memoryObject; #reset active selected object\r\n \r\n \r\n #get sensor values\r\n hitL= detector.attributes['hit_L'].data[0].value #retieve each custom property by name\r\n hitR= detector.attributes['hit_R'].data[0].value #geometry nodes adds custom properties to all vertices,\r\n hitF= detector.attributes['hit_F'].data[0].value #but we only need it once so we select the 0th vertex\r\n hitD= detector.attributes['hit_D'].data[0].value #and take it's properties. It's inefficient but it works.\r\n velocity= detector.attributes['velocity'].data[0].value\r\n steeringForce = detector.attributes['steeringForce'].data[0].value\r\n isRigid= detector.attributes['isRigidBody'].data[0].value\r\n \r\n \r\n if hitL == 0: # prevent infinite length rays being counted as hit at 0\r\n hitL = 250;\r\n if hitR == 0:\r\n hitR = 250;\r\n if hitF ==0:\r\n hitF = 250;\r\n if hitD == 0:\r\n hitD = 250;\r\n \r\n if Globals.debug ==1: #print raycast info if \"RUN PROGRAM\"'s custom\r\n print(obj.name + \" -\")\r\n print(\" Left raycast is : \" + str(hitL)) #property \"debug\" is equal to 1.\r\n print(\" Right raycast is : \" + str(hitR))\r\n print(\" Front raycast is : \" + str(hitF))\r\n print(\" Down raycast is : \" + str(hitD))\r\n print(\" \") \r\n print(isRigid);\r\n if isRigid == 0:\r\n if running ==1:\r\n print((2/(hitL*hitL)-2/(hitR*hitR)) *steeringForce*mytool.steering_force)\r\n\r\n steering.rigid_body_constraint.motor_ang_target_velocity=(2/(hitL*hitL)-2/(hitR*hitR)) *steeringForce*mytool.steering_force;\r\n if hitD: \r\n \r\n brake = 1-stopping;\r\n #current car setup is RWD\r\n motorFL.rigid_body_constraint.motor_ang_target_velocity = 0; \r\n motorFL.rigid_body_constraint.motor_ang_max_impulse = brake; \r\n motorFR.rigid_body_constraint.motor_ang_target_velocity = 0;\r\n motorFR.rigid_body_constraint.motor_ang_max_impulse = brake;\r\n \r\n #RWD Motors\r\n motorRL.rigid_body_constraint.motor_ang_target_velocity = (hitF-0.5)*100*mytool.max_speed;\r\n motorRL.rigid_body_constraint.motor_ang_max_impulse = velocity*200*brake*mytool.max_acceleration; \r\n motorRR.rigid_body_constraint.motor_ang_target_velocity = (hitF-0.5)*100*mytool.max_speed;\r\n motorRR.rigid_body_constraint.motor_ang_max_impulse = velocity*200*brake*mytool.max_acceleration;\r\n if stopping:\r\n Globals.stopping = 0;\r\n else:\r\n print(\"1\")\r\n if Globals.running ==1:\r\n print(\"2\")\r\n obj.rotation_euler[2]-=(1/(hitL*hitL)-1/ (hitR*hitR))*0.005 * steeringForce*mytool.steering_force\r\n angle = obj.rotation_euler[2];\r\n if hitD:\r\n print(\"3\")\r\n obj.location[0]-=math.sin(angle)*velocity*mytool.max_speed;\r\n obj.location[1]+=math.cos(angle)*velocity*mytool.max_speed;\r\n else:\r\n \r\n bpy.app.handlers.frame_change_pre.remove(runProgram)\r\n\r\n \r\n print(\"Raytracing-AI has started.\") # register program if \"RUN PROGRAM\"'s custom property \"run\" is equal to 1 \r\n bpy.app.handlers.frame_change_pre.append(runProgram)\r\n\r\n\r\n\r\n\r\ndef stopButton(context):\r\n if Globals.running == 1:\r\n Globals.running = 0;\r\n Globals.stopping = 1;\r\n print(\"Raytracing-AI has been stopped.\")\r\n bpy.context.scene.frame_current+=1;\r\n bpy.context.scene.frame_current-=1;\r\n \r\n else:\r\n print(\"Raytracing-AI has already been stopped.\")\r\n\r\ndef resetButton(context):\r\n bpy.context.scene.frame_current=0;\r\n for obj in bpy.context.scene.objects:\r\n if \"raycast_Controller\" in obj.name:\r\n obj.location[0] = 0;\r\n obj.location[1] = 0;\r\n obj.location[2] = 0;\r\n obj.rotation_euler[0] = 0;\r\n obj.rotation_euler[1] = 0;\r\n obj.rotation_euler[2] = math.pi/2;\r\n\r\ndef debugButton(context):\r\n if Globals.debug == 0:\r\n Globals.debug = 1;\r\n print(\"Raytracing-AI will now output debug information.\")\r\n else:\r\n Globals.debug = 0;\r\n print(\"Raytracing-AI will no longer output debug information.\") \r\n \r\n\r\n\r\nclass Run_AI(bpy.types.Operator):\r\n \"\"\"Tooltip\"\"\"\r\n bl_idname = \"raytracer.run\"\r\n bl_label = \"Toggle Run\"\r\n\r\n\r\n def execute(self, context):\r\n if Globals.running == 0:\r\n Globals.running = 1;\r\n Globals.stopping = 0;\r\n runButton(context,bpy.context.scene.my_tool);\r\n \r\n elif Globals.running == 1:\r\n Globals.running = 0;\r\n Globals.stopping = 1;\r\n stopButton(context);\r\n \r\n return {'FINISHED'}\r\n\r\nclass Stop_AI(bpy.types.Operator):\r\n \"\"\"Tooltip\"\"\"\r\n bl_idname = \"raytracer.stop\"\r\n bl_label = \"Force Stop\"\r\n \r\n \r\n\r\n def execute(self, context):\r\n stopButton(context)\r\n return {'FINISHED'}\r\n\r\nclass Reset_AI(bpy.types.Operator):\r\n \"\"\"Tooltip\"\"\"\r\n bl_idname = \"raytracer.reset\"\r\n bl_label = \"Reset\"\r\n \r\n def execute(self,context):\r\n resetButton(context);\r\n return {'FINISHED'} \r\n\r\nclass Debug_AI(bpy.types.Operator):\r\n \"\"\"Tooltip\"\"\"\r\n bl_idname = \"raytracer.debug\"\r\n bl_label = \"Debug\"\r\n \r\n def execute(self,context):\r\n debugButton(context);\r\n return {'FINISHED'}\r\n\r\n \r\n\r\n\r\nclass LayoutDemoPanel(bpy.types.Panel):\r\n bl_label = \"Raytracing AI\"\r\n bl_idname = \"SCENE_PT_layout\"\r\n bl_space_type = 'PROPERTIES'\r\n bl_region_type = 'WINDOW'\r\n bl_context = \"scene\"\r\n\r\n def draw(self, context):\r\n layout = self.layout\r\n\r\n scene = context.scene\r\n\r\n\r\n\r\n\r\n # Different sizes in a row\r\n if Globals.running == 1:\r\n layout.label(text=\"The script is now running.\")\r\n elif Globals.running == 0:\r\n layout.label(text=\"The script is not currently running.\")\r\n \r\n row = layout.row(align=True)\r\n sub = row.row()\r\n sub.scale_x = 1.0\r\n sub.operator(\"raytracer.run\")\r\n sub.operator(\"raytracer.reset\")\r\n \r\n\r\n row = layout.row()\r\n row.operator(\"raytracer.debug\")\r\n row.scale_x=2\r\n if Globals.debug ==1: \r\n row.label(text=\"Outputting debug info to console.\")\r\n elif Globals.debug ==0:\r\n row.label(text=\"Not outputting debug info\")\r\n \r\n row = layout.row()\r\n row.operator('raytracer.stop')\r\n mytool = scene.my_tool\r\n row = layout.row()\r\n row.prop(mytool,\"max_speed\");\r\n row.prop(mytool,\"max_acceleration\");\r\n row=layout.row();\r\n row.prop(mytool,\"steering_force\");\r\n\r\ndef register():\r\n \r\n\r\n \r\n bpy.utils.register_class(Properties)\r\n bpy.types.Scene.my_tool = bpy.props.PointerProperty(type=Properties);\r\n bpy.utils.register_class(Run_AI)\r\n bpy.utils.register_class(Reset_AI)\r\n bpy.utils.register_class(Stop_AI)\r\n bpy.utils.register_class(Debug_AI)\r\n bpy.utils.register_class(LayoutDemoPanel)\r\n\r\ndef unregister():\r\n bpy.utils.unregister_class(LayoutDemoPanel)\r\n bpy.utils.unregister_class(Run_AI)\r\n bpy.utils.unregister_class(Reset_AI)\r\n bpy.utils.unregister_class(Stop_AI)\r\n bpy.utils.unregister_class(Debug_AI)\r\n \r\n bpy.utils.unregister_class(Properties)\r\n del bpy.types.Scene.my_tool;\r\n\r\n\r\nif __name__ == \"__main__\":\r\n register()\r\n","repo_name":"meeww/Blender-Raycasting-Agent","sub_path":"ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":12125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8093220473","text":"import os\nimport json\nimport logging\nimport csv\nfrom dotenv import dotenv_values\n\nfrom crouler.kinopoisk_crouler import get_main_page_metadata, get_category_films, get_film_description\n\nlogging.basicConfig(level=logging.INFO, format=\"%(asctime)s %(levelname)s %(message)s\")\n\nconfig = dotenv_values(\".env\")\nmain_page_link = config.get(\"SEARCH_PAGE_LINK\")\nmain_link = config.get(\"MAIN_LINK\")\nmetadata_file_name = config.get(\"MAIN_PAGE_METADATA_FILE_NAME\")\nstorage_name = config.get(\"STORAGE_NAME\")\nfilm_dataset_file_name = config.get(\"DATASET_FILE_NAME\")\n\nmetadata_full_file_name = storage_name + '/' + metadata_file_name\n\n\ndef is_file_exists(file_name) -> bool:\n return os.path.isfile(file_name)\n\n\ndef create_storage_if_not_exist(directory_name):\n if not os.path.exists(directory_name):\n os.makedirs(directory_name)\n\n\ndef load_main_page_metadata_from_file(file_name) -> [{}]:\n with open(file_name, 'r', encoding='utf8') as json_file:\n return json.load(json_file)\n\n\ndef save_metadata_to_file(file_name, main_page_metadata: list[{}]):\n logging.info(f\"Сохраняем полученный результат в файл - {metadata_file_name}\")\n with open(file_name, 'w', encoding='utf8') as file:\n file.write(json.dumps(main_page_metadata, ensure_ascii=False))\n\n\ndef load_film_categories(main_page_metadata) -> [{}]:\n category_len = len(main_page_metadata)\n\n if category_len > 0:\n logging.info(f\"Получаем данные по категориям фильмов ... всего категорий {category_len}\")\n for category in main_page_metadata:\n for category_name, category_metadat in category.items():\n logging.info(f\"Получаем данные для категории - {category_name}\")\n\n category_link = main_link + '/' + category_metadat['href']\n films = get_category_films(category_link)\n category[category_name]['films'] = films\n\n save_metadata_to_file(metadata_full_file_name, main_page_metadata)\n\n return main_page_metadata\n\n\ndef is_film_category_loaded(main_page_metadata) -> bool:\n logging.info(f\"Проверяем фильмы по категориям ... \")\n\n result = True\n\n for category in main_page_metadata:\n for category_name, category_metadat in category.items():\n films = category_metadat['films']\n if films is not None and len(films) > 0:\n for film in films:\n name = film['name']\n link = film['link']\n if (name is None and name == '') and (link is None or link == ''):\n result = True\n break\n else:\n result = False\n\n return result\n\n\ndef load_dataset_to_file(main_page_metadata):\n dataset_file_name = storage_name + '/' + film_dataset_file_name\n\n if is_file_exists(dataset_file_name):\n os.remove(dataset_file_name)\n\n dataset_header = ['name', 'link', 'year', 'country', 'rating', 'scoreCount', 'category', 'description']\n\n with open(storage_name + '/' + film_dataset_file_name, 'w', encoding='UTF8') as f:\n writer = csv.writer(f)\n writer.writerow(dataset_header)\n\n for category in main_page_metadata:\n for category_name, category_metadat in category.items():\n for film in category_metadat['films']:\n logging.info(f\"Получаем описание фильма - {film['name']}\")\n\n film['category'] = category_name\n film['description'] = get_film_description(main_link + film['link'])\n\n writer.writerow([\n film['name'],\n film['link'],\n film['year'],\n film['country'],\n film['rating'],\n film['scoreCount'],\n film['category'],\n film['description']\n ])\n\n f.flush()\n\n\ndef main():\n logging.info(f\"Я запустился и начал работу ... создам хранилище если его еще нету - {storage_name}\")\n create_storage_if_not_exist(storage_name)\n\n if not is_file_exists(metadata_full_file_name):\n logging.info(f\"Получаем метаданные главной страницы Кинопоиска - {main_page_link}\")\n main_page_metadata = get_main_page_metadata(main_page_link)\n\n save_metadata_to_file(metadata_full_file_name, main_page_metadata)\n else:\n logging.info(\n f\"Метаданные главной страницы Кинопоиска {main_page_link} были загружены ранее - {metadata_full_file_name}\")\n\n logging.info(f\"Считаем данные из файла ... \")\n main_page_metadata = load_main_page_metadata_from_file(metadata_full_file_name)\n\n if not is_film_category_loaded(main_page_metadata):\n main_page_metadata = load_film_categories(main_page_metadata)\n\n load_dataset_to_file(main_page_metadata)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"avetall87/otus-nlp","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12593057765","text":"import RPi.GPIO as GPIO\nimport time\n\n#zu dieser Farbe soll es wechseln\n# ACHTUNG\n# sollte zur 3. Farbe wechseln\n# funktioniert nur einmal, dannach musst du mich mal anrufen, bzw. eigentlich nur die color_before=2 setzten und die color=0\n# Alle Motoren müssen in der richtigen Position sein, insbesondere der m5 muss oben sein (GPIO.input(m5_oben, GPIO.HIGH))\n# \ncolor = 2\n\n#Define Pins\nGPIO.setmode(GPIO.BOARD)\nm5_right = 24\nm5_left = 30\nm5_oben = 26\nGPIO.setup(m5_right, GPIO.OUT)\nGPIO.setup(m5_left, GPIO.OUT)\nGPIO.setup(s5_oben, GPIO.IN)\n\n#Global var's\ncolor_before = 0\n\n#Functions\ndef MoveHead_Right(count):\n for x in range(count):\n GPIO.output(m5_right, GPIO.HIGH)\n time.sleep(0.2)\n while(GPIO.input(m5_oben, GPIO.LOW)):\n GPIO.output(m5_right, GPIO.HIGH)\n GPIO.output(m5_right, GPIO.LOW)\n time.sleep(0.7)\n\ndef MoveHead_Left(count):\n for x in range(count):\n GPIO.output(m5_left, GPIO.HIGH)\n time.sleep(0.2)\n while(GPIO.input(m5_oben, GPIO.LOW)):\n GPIO.output(m5_left, GPIO.HIGH)\n GPIO.output(m5_left, GPIO.LOW)\n time.sleep(0.7)\n\ndef changeColor(color):\n color_idx = int(color)\n diff = color_idx - color_before\n if(diff > 0):\n MoveHead_Left(diff)\n if(diff < 0):\n diff = -diff\n MoveHead_Right(diff)\n color_before = color_idx\n\nchangeColor(color)\n\nGPIO.cleanup()\n","repo_name":"peterlusticus/Rembr","sub_path":"old/test/color-test.py","file_name":"color-test.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"16270053823","text":"lenguajes = [\"phyton\", \"ruby\", \"php\", \"sql\", \"java\"]\r\n\r\ni = 1\r\nwhile i <= 5:\r\n print(i)\r\n i = i + 1\r\n\r\ni = 1\r\nwhile i <= 5:\r\n print(i * \"el weta \")\r\n i = i + 1\r\n\r\ni = 0 \r\nwhile i < len(lenguajes):\r\n print(lenguajes[i])\r\n i = i + 1\r\n \r\n #a continuacion ejecutare un codigo para crear un boton de pago\r\n ","repo_name":"RicardoLpz/Python-Project","sub_path":"12-while.py","file_name":"12-while.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4207563894","text":"class Solution(object):\n def threeSumSmaller(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: int\n \"\"\"\n nums = sorted(nums)\n \n # trival base case\n if len(nums) < 3:\n return 0\n \n count = 0\n \n for i in range(len(nums)-2):\n # set left and right pointers\n left = i + 1\n right = len(nums) - 1\n \n # loop until left and right are next to each other\n while left < right:\n # move left pointer to the right if the sum is less than target\n # move right pointer to the left if the sum is greater than target\n # counter increments by (right - left) because if nums[i] + nums[left] + nums[right] < target,\n # then we know all of nums[i] + nums[left] + nums[x] is less than target where x is between left and right\n if nums[i] + nums[left] + nums[right] < target:\n count += right - left\n left += 1\n else:\n right -= 1\n \n return count\n ","repo_name":"vincehientran/Leetcode","sub_path":"Companies/Citadel/Medium/3SumSmaller.py","file_name":"3SumSmaller.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6504579193","text":"import os\nimport json\n\nconfig = None\n\n\ndef read_config():\n global config\n script_path = os.path.abspath(__file__)\n script_dir = os.path.split(script_path)[0]\n\n with open(str(os.path.join(script_dir, 'default.json'))) as default_data:\n config = json.load(default_data)\n\n with open(str(os.path.join(script_dir, 'config.json'))) as config_data:\n config.update(json.load(config_data))\n\n\ndef get_config():\n global config\n if not config:\n read_config()\n return config\n","repo_name":"epu-ntua/sphinx-vaaas-api","sub_path":"config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42680134795","text":"from django.test import TestCase\r\n\r\nfrom ..models import Group, Post, User\r\nfrom ..models import FIRST_CHAR\r\n\r\nUSER_NAME = 'testuser'\r\nGROUP_NAME = 'Тестовая група'\r\nGROUP_SLUG = 'test-slug'\r\nGROUP_DESCRIPTION = 'Тестовое описание'\r\nPOST_TEXT = 'Тестовый пост'\r\n\r\n\r\nclass ModelTest(TestCase):\r\n @classmethod\r\n def setUpClass(cls):\r\n super().setUpClass()\r\n cls.user = User.objects.create_user(username=USER_NAME)\r\n cls.group = Group.objects.create(\r\n title=GROUP_NAME,\r\n slug=GROUP_SLUG,\r\n description=GROUP_DESCRIPTION,\r\n )\r\n cls.post = Post.objects.create(\r\n author=cls.user,\r\n text=POST_TEXT,\r\n group=cls.group,\r\n )\r\n\r\n def test_models_have_correct_object_names(self):\r\n \"\"\"Проверяем, что у моделей корректно работает __str__.\"\"\"\r\n post = self.post\r\n group = self.group\r\n field_str = {\r\n post.text[:FIRST_CHAR]: str(post),\r\n group.title: str(group),\r\n }\r\n for correct_object_name, expected_value in field_str.items():\r\n with self.subTest(correct_object_name=correct_object_name):\r\n self.assertEqual(correct_object_name, expected_value)\r\n\r\n def test_post_have_correct_help_text(self):\r\n \"\"\"Проверяем, что у моделb Post корректно работает help_text.\"\"\"\r\n post = self.post\r\n field_help_text = {\r\n 'text': 'Основной текст поста',\r\n 'pub_date': 'Дата когда был создан пост',\r\n 'group': 'Группа к которой будет относится пост',\r\n 'author': 'Автор данного поста',\r\n }\r\n for field, expected_value in field_help_text.items():\r\n with self.subTest(field=field):\r\n self.assertEqual(\r\n post._meta.get_field(field).help_text, expected_value)\r\n\r\n def test_post_have_correct_verbose_name(self):\r\n \"\"\"Проверяем, что у моделb Post корректно работает verbose_name.\"\"\"\r\n post = self.post\r\n field_help_text = {\r\n 'text': 'Содержание поста',\r\n 'pub_date': 'Дата публикации',\r\n 'group': 'Група',\r\n 'author': 'Автор',\r\n }\r\n for field, expected_value in field_help_text.items():\r\n with self.subTest(field=field):\r\n self.assertEqual(\r\n post._meta.get_field(field).verbose_name, expected_value)\r\n","repo_name":"NECROshizo/Yatube","sub_path":"yatube/posts/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"45617859585","text":"import torch\nfrom torch import nn\nfrom utils import load_data_wiki\nfrom Bert import BERTModel\nfrom train_eval import train_bert, get_bert_encoding\n\n\nif __name__ == '__main__':\n batch_size, max_len = 512, 64\n train_iter, vocab = load_data_wiki(batch_size, max_len)\n net = BERTModel(len(vocab), num_hiddens=128, norm_shape=[128], ffn_num_input=128, ffn_num_hiddens=256, num_heads=2,\n num_layers=2, dropout=0.2, key_size=128, query_size=128, value_size=128, hid_in_features=128,\n mlm_in_features=128, nsp_in_features=128)\n devices = [torch.device(f'cuda:{i}') for i in range(torch.cuda.device_count())]\n devices = devices if devices else [torch.device('cpu')]\n loss = nn.CrossEntropyLoss()\n train_bert(train_iter, net, loss, len(vocab), devices, 50)\n tokens_a = ['a', 'crane', 'is', 'flying']\n encoded_text = get_bert_encoding(net, vocab, devices, tokens_a)\n # 词元:'','a','crane','is','flying',''\n encoded_text_cls = encoded_text[:, 0, :]\n encoded_text_crane = encoded_text[:, 2, :]\n print(encoded_text.shape, encoded_text_cls.shape, encoded_text_crane[0][:3])\n # BERT表⽰是上下⽂敏感的\n tokens_a, tokens_b = ['a', 'crane', 'driver', 'came'], ['he', 'just', 'left']\n encoded_pair = get_bert_encoding(net, vocab, devices, tokens_a, tokens_b)\n # 词元:'','a','crane','driver','came','','he','just', 'left',''\n encoded_pair_cls = encoded_pair[:, 0, :]\n encoded_pair_crane = encoded_pair[:, 2, :]\n print(encoded_pair.shape, encoded_pair_cls.shape, encoded_pair_crane[0][:3])\n","repo_name":"kaddly/NaturalLanguageProcessing","sub_path":"PreTrain/Bert/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1508442192","text":"'''import sys\nn, k = map(int, sys.stdin.readline().split())\nwv = [list(map(int, sys.stdin.readline().split())) for _ in range(n)]\n\ndp = [0 for _ in range(2*(k+1))]\n\nfor temp_w, temp_v in wv:\n dp[temp_w] = temp_v\nans = -100\n\nfor i in range(0, k+1):\n for j in range(0, i):\n if dp[i] == 0 or dp[j] == 0:\n continue\n if dp[i+j] == 0:\n ans = dp[i] + dp[j]\n if ans > dp[i+j]:\n dp[i+j] = ans\n\nprint(dp[k])\n#print(dp)\n'''\n\nimport sys\nread = sys.stdin.readline\n\nN, K = map(int, read().split())\ncache = [0] * (K+1) # 인덱스 : 무게, 값 : 가치\n\nfor _ in range(N):\n w, v = map(int, read().split())\n for j in range(K, w-1, -1):\n cache[j] = max(cache[j], cache[j-w] + v)\n # 기존의 무게j일때 가치와 물건(무게w이고 가치v인)을 넣었을 때의 가치 중 최댓값\n \nprint(cache[-1])\n\n#\n# 거꾸로 접근.\n\n#================================================#\n\nimport sys\nread = sys.stdin.readline\n\nN, K = map(int, read().split())\ncache = {0: 0}\n\nfor _ in range(N):\n curr_w, curr_v = map(int, read().split())\n temp = {}\n for w, v in cache.items():\n if curr_w + w <= K and curr_v + v > cache.get(curr_w + w, 0):\n temp[curr_w + w] = curr_v + v\n cache.update(temp)\nprint(max(cache.values()))","repo_name":"YoungTae0406/bj-pg_Algorithm","sub_path":"12865_normalBag.py","file_name":"12865_normalBag.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13109427568","text":"from gpiozero import LEDBoard\nfrom gpiozero.tools import random_values\nfrom signal import pause\nimport random, time\n\ntree = LEDBoard(*range(2,28),pwm=True)\n\neffectsSpeed = 0.025 #This is the speed at which the LEDs update. Bigger number = slower updates.\n\n###################################\n# #\n# The LEDs are setup with PWM #\n# and can accept values between #\n# 0.0 and 1.0. #\n# #\n# The star is on GPIO 2, or LED #\n# tree[0]. #\n# #\n###################################\n\nled_dict = {} #Setup the main dictionary\nled_list = range(len(tree)) #make a list of sequential numbers, 1 number per LED\n\nfor ident in led_list: #Cycle through each LED and setup default values\n ledMotion = ident%2 #This will give us alternating 1's and 0's to make effects look better\n ledSpeed = random.randrange(0,5) #Pick a random speed range\n\n randomValueList = [0,0,0,0] #Define the list to hold all the seed brightness values\n randomValueList[0] = (random.randrange(1,25))/100.0 #0.01 through 0.25\n randomValueList[1] = (random.randrange(26,50))/100.0 #0.26 through 0.50\n randomValueList[2] = (random.randrange(51,75))/100.0 #0.51 through 0.75\n randomValueList[3] = (random.randrange(76,99))/100.0 #0.76 through 0.99\n value_group = random.randrange(0,3) #Pick one of the brightness vaules to assign LED\n\n #Setup the LED dictionary\n led_dict[str(ident)] = {\"value\":randomValueList[value_group], \"motion\":ledMotion, \"speed\":ledSpeed}\n\n\n\n###############################################################################\n# #\n# brightnessModify(value, motion, speed): #\n# value = LED brightness value #\n# motion = increasing or decreasing brightness (1=increasing 0=decreasing) #\n# speed = speed at which brightness changes; 6 speed settings (0 through 5) #\n# #\n# returns: #\n# function returns new value, motion, and speed values #\n# value should always be new except around boundary values #\n# motion and speed may or may not change #\n# #\n###############################################################################\ndef brightnessModify(value, motion, speed):\n if speed == 0: #slowest speed\n deltaChange = 0.001 #step size\n if speed == 1:\n deltaChange = 0.005\n if speed == 2:\n deltaChange = 0.008\n if speed == 3:\n deltaChange = 0.01\n if speed == 4:\n deltaChange = 0.0175\n if speed == 5: #fastest speed\n deltaChange = 0.0225\n\n if motion == 1: #increasing in value\n value = value + deltaChange\n if motion == 0: #decreasing in value\n value = value - deltaChange\n\n if (motion == 1) and (value > 0.5): #chance for motion and speed to change for effects\n motion = random.randrange(0,1)\n speed = random.randrange(0,5)\n\n if (motion == 0) and (value < 0.5): #chance for motion and speed to change for effects\n motion = random.randrange(0,1)\n speed = random.randrange(0,5)\n\n\n if ((value < 0.000) or (value > 1.0)): #if we've hit a boundary for the PWM\n\n if value < 0.0: #If we are at the lower boundary\n value = 0.0 #Force value to be 0\n motion = 1 #Change motion to increase on next pass through\n\n if value > 1.0: #If we are at the upper boundary\n value = 1.0 #Force value to be 1\n motion = 0 #Change motion to decrease on next pass through\n\n return [value, motion, speed]\n\n\n\n#############################################################\n# #\n# Main While Loop #\n# Gets the new settings out of the dictionary #\n# Sets the brightness of the LED via \"value\" #\n# Calls to brightnessModify() to get new parameters #\n# Writes new parameters into dictionary and gets next LED #\n# #\n#############################################################\n\nwhile (1):\n\n for leds in led_list:\n ledString = str(leds) #Needed to ref the dict\n\n value = led_dict[ledString]['value'] #Pull the value info\n motion = led_dict[ledString]['motion'] #Pull the motion info\n speed = led_dict[ledString]['speed'] #Pull the speed info\n\n tree[leds].value = value #Set the LED brightness\n\n newSettings = brightnessModify(value, motion, speed) #Get the new settings\n# print str(newSettings[0]) + \" --- \" + str(newSettings[1]) + \" --- \" + str(newSettings[2])\n led_dict[ledString]['value'] = newSettings[0] #Update the value info\n led_dict[ledString]['motion'] = newSettings[1] #Update the motion info\n led_dict[ledString]['speed'] = newSettings[2] #Update the speed info\n\n time.sleep(effectsSpeed) #Time between LED updates\n\n","repo_name":"Echoskope/3D-Xmas-Tree-for-Raspberry-Pi","sub_path":"raspPi3DxmasTree_finished.py","file_name":"raspPi3DxmasTree_finished.py","file_ext":"py","file_size_in_byte":5091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"44096427868","text":"file1 = open('C:/Users/u6078131/Code/gitHub/CodingProjects/python-projects/columns_txn_ext.txt', 'r') \nLines = file1.readlines() \nfile2 = open('myfile.txt', 'w') \ncount = 0\n# Strips the newline character \nfor line in Lines: \n #print(line.strip())\n words = line.split(\" as \");\n file2.write(\"aMap.put({},\\\"{}\\\");\\n\".format(words[1].split(',')[0].strip(), words[0].strip()))\n # print(\"put(\\\"{}\\\", {});\".format(words[0].strip(), words[1].split(',')[0].strip())) \nfile2.close()\nfile1.close()","repo_name":"anuplalgupta/CodingProjects","sub_path":"python-projects/readFileandOutputStaticMapForJava.py","file_name":"readFileandOutputStaticMapForJava.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"31034412123","text":"from flask import Flask,render_template,redirect,request,url_for,flash\nfrom app import app, db, login_manager\nfrom app.models.PessoaModel import Pessoa\nfrom app.models.UsuarioModel import UsuarioModel\nfrom app.models.ProdutoModel import ProdutoModel\nfrom app.controllers.login.login import requires_roles\nfrom flask_login import LoginManager, UserMixin, login_required,login_user, logout_user\nfrom werkzeug.security import generate_password_hash, check_password_hash\n\n@app.route('/listagem')\n# @requires_roles('Cliente')\n# @login_required\ndef listagem():\n\tpessoas = Pessoa.query.all()\n\treturn render_template('development/listagem.html', pessoas=pessoas, ordem='id')\n\n@app.route('/selecao/')\n# @login_required\ndef selecao(id=0):\n\tpessoas = Pessoa.query.filter_by(id=id).all()\n\treturn render_template('development/listagem.html', pessoas=pessoas, ordem='id')\n\n@app.route('/ordenacao//')\n# @login_required\ndef ordenacao(campo='id', ordem_anterior=''):\n\tif campo =='id':\n\t\tif ordem_anterior == campo:\n\t\t\tpessoas = Pessoa.query.order_by(Pessoa.id.desc()).all()\n\t\telse:\n\t\t\tpessoas = Pessoa.query.order_by(Pessoa.id).all()\n\telif campo == 'nome':\n\t\tif ordem_anterior == campo:\n\t\t\tpessoas = Pessoa.query.order_by(Pessoa.nome.desc()).all()\n\t\telse:\n\t\t\tpessoas = Pessoa.query.order_by(Pessoa.nome).all()\n\telif campo == 'idade':\n\t\tif ordem_anterior == campo:\n\t\t\tpessoas = Pessoa.query.order_by(Pessoa.idade.desc()).all()\n\t\telse:\n\t\t\tpessoas = Pessoa.query.order_by(Pessoa.idade).all()\n\telif campo == 'sexo':\n\t\tif ordem_anterior == campo:\n\t\t\tpessoas = Pessoa.query.order_by(Pessoa.sexo.desc()).all()\n\t\telse:\n\t\t\tpessoas = Pessoa.query.order_by(Pessoa.sexo).all()\n\telif campo == 'salario':\n\t\tif ordem_anterior == campo:\n\t\t\tpessoas = Pessoa.query.order_by(Pessoa.salario.desc()).all()\n\t\telse:\n\t\t\tpessoas = Pessoa.query.order_by(Pessoa.salario).all()\n\telse:\n\t\tpessoas = Pessoa.query.order_by(Pessoa.id).all()\n\n\treturn render_template('development/listagem.html', pessoas=pessoas, ordem=campo)\n\n@app.route('/consulta', methods=['POST'])\n# @login_required\ndef consulta():\n\tconsulta = '%'+request.form.get('consulta')+'%'\n\tcampo = request.form.get('campo')\n\t\n\tif campo == 'nome':\n\t\tpessoas = Pessoa.query.filter(Pessoa.nome.like(consulta)).all()\n\telif campo == 'idade':\n\t\tpessoas = Pessoa.query.filter(Pessoa.idade.like(consulta)).all()\n\telif campo == 'sexo':\n\t\tpessoas = Pessoa.query.filter(Pessoa.sexo.like(consulta)).all()\n\telif campo == 'salario':\n\t\tpessoas = Pessoa.query.filter(Pessoa.salario.like(consulta)).all()\n\telse:\n\t\tpessoas = Pessoa.query.all()\n\t\n\treturn render_template('development/listagem.html', pessoas=pessoas, ordem='id')\n\n@app.route('/insercao')\n# @login_required\ndef insercao():\n\treturn render_template('development/insercao.html')\n\n@app.route('/salvar_insercao', methods=['POST'])\ndef salvar_insercao():\n\tnome = request.form.get('nome')\n\tidade = int(request.form.get('idade'))\n\tsexo = request.form.get('sexo')\n\tsalario = float(request.form.get('salario'))\n\n\tpessoa = Pessoa(nome,idade,sexo,salario)\n\n\tdb.session.add(pessoa)\n\tdb.session.commit()\n\n\tpessoas = Pessoa.query.all()\n\treturn render_template('development/listagem.html', pessoas=pessoas, ordem='id')\n\n@app.route('/edicao/')\n@login_required\ndef edicao(id=0):\n\tpessoa = Pessoa.query.filter_by(id =id).first()\n\treturn render_template('development/edicao.html', pessoa=pessoa)\n\n@app.route('/salvar_edicao',methods=['POST'])\n# @login_required\ndef salvar_edicao():\n\tId = int(request.form.get('id'))\n\tNome = request.form.get('nome')\n\tIdade = int(request.form.get('idade'))\n\tSexo = request.form.get('sexo')\n\tSalario = float(request.form.get('salario'))\n\n\tpessoa = Pessoa.query.filter_by(id=Id).first()\n\t\n\tpessoa.nome = Nome\n\tpessoa.idade = Idade\n\tpessoa.sexo = Sexo\n\tpessoa.salario = Salario\n\n\tdb.session.commit()\n\n\tpessoas = Pessoa.query.all()\n\treturn render_template('development/listagem.html', pessoas=pessoas, ordem='id')\n\n@app.route('/delecao/')\n# @login_required\ndef delecao(id=0):\n\tpessoa = Pessoa.query.filter_by(id=id).first()\n\treturn render_template('delecao.html', pessoa=pessoa)\n\n@app.route('/salvar_delecao', methods=['POST'])\n# @login_required\ndef salvar_delecao():\n\tId = int(request.form.get('id'))\n\n\tpessoa = Pessoa.query.filter_by(id=Id).first()\n\n\tdb.session.delete(pessoa)\n\tdb.session.commit()\n\n\tpessoas = Pessoa.query.all()\n\treturn render_template('development/listagem.html', pessoas=pessoas, ordem='id')\n\t\n\n@app.route('/graficos')\n# @login_required\ndef graficos():\n\tpessoasM = Pessoa.query.filter_by(sexo='M').all()\n\tpessoasF = Pessoa.query.filter_by(sexo='F').all()\n\t\n\tsalarioM = 0\n\tfor m in pessoasM:\n\t\tsalarioM += m.salario\n\tif len(pessoasM) > 0:\n\t\tsalarioM = salarioM / len(pessoasM)\n\n\tsalarioF = 0\n\tfor f in pessoasF:\n\t\tsalarioF += f.salario\n\tif len(pessoasF) > 0:\n\t\tsalarioF = salarioF / len(pessoasF)\n\t\t\n\tIdadeM = 0\n\tfor m in pessoasM:\n\t\t\tIdadeM += m.idade\n\tif len(pessoasM) > 0:\n\t\t\tIdadeM = IdadeM / len(pessoasM)\n\n\tIdadeF = 0\n\tfor f in pessoasF:\n\t\tIdadeF += f.idade\n\tif len(pessoasF) > 0:\n\t\tIdadeF = IdadeF / len(pessoasF)\n\t\t\t\n\treturn render_template('development/graficos.html',\n\t\t\t\t\t\t\tsalarioM=salarioM, salarioF=salarioF, idadeM=IdadeM, idadeF=IdadeF)\n","repo_name":"rolf-gutz/OpeTuring","sub_path":"app/controllers/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":5186,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25111309100","text":"# from bikingapp.views import IssueCreateView\nfrom captcha.fields import ReCaptchaField\nfrom captcha.widgets import ReCaptchaV2Checkbox\nfrom django import forms\nfrom django.forms import (\n TextInput,\n DateTimeInput,\n RadioSelect,\n Select,\n Textarea,\n NumberInput,\n)\nfrom django.contrib.auth.forms import (\n UserCreationForm,\n AuthenticationForm,\n SetPasswordForm,\n PasswordResetForm,\n)\nfrom django.contrib.auth import get_user_model\nfrom .models import Event, Workout, Comment, DiscForumComment, Issue\n\n# from .models import Account\nfrom .widgets import DatePickerInput, TimePickerInput\n\n\nclass UserRegistrationForm(UserCreationForm):\n email = forms.EmailField(help_text=\"A valid email address, please.\", required=True)\n\n class Meta:\n model = get_user_model()\n fields = [\n \"first_name\",\n \"last_name\",\n \"username\",\n \"email\",\n \"password1\",\n \"password2\",\n ]\n\n def save(self, commit=True):\n user = super(UserRegistrationForm, self).save(commit=False)\n user.email = self.cleaned_data[\"email\"]\n if commit:\n user.save()\n\n return user\n\n\nclass UserLoginForm(AuthenticationForm):\n def __init__(self, *args, **kwargs):\n super(UserLoginForm, self).__init__(*args, **kwargs)\n\n username = forms.CharField(\n widget=forms.TextInput(\n attrs={\"class\": \"form-control\", \"placeholder\": \"Username\"}\n ),\n label=\"Username\",\n )\n\n password = forms.CharField(\n widget=forms.PasswordInput(\n attrs={\"class\": \"form-control\", \"placeholder\": \"Password\"}\n )\n )\n\n captcha = ReCaptchaField(widget=ReCaptchaV2Checkbox())\n\n\nclass UserUpdateForm(forms.ModelForm):\n # email = forms.EmailField()\n\n class Meta:\n model = get_user_model()\n fields = [\"first_name\", \"last_name\", \"image\", \"description\"]\n\n\nclass SetPasswordForm(SetPasswordForm):\n class Meta:\n model = get_user_model()\n fields = [\"new_password1\", \"new_password2\"]\n\n\nclass PasswordResetForm(PasswordResetForm):\n def __init__(self, *args, **kwargs):\n super(PasswordResetForm, self).__init__(*args, **kwargs)\n\n captcha = ReCaptchaField(widget=ReCaptchaV2Checkbox())\n\n\nclass EventForm(forms.ModelForm):\n friends_invited = forms.CharField(label=\"extra_field\", required=False)\n\n class Meta:\n model = Event\n fields = (\n \"title\",\n \"location\",\n \"borough\",\n \"state\",\n \"zipcode\",\n \"date\",\n \"time\",\n \"date_created\",\n \"event_type\",\n \"description\",\n \"created_by\",\n )\n widgets = {\n \"title\": TextInput(\n attrs={\n \"class\": \"form-control\",\n \"style\": \"max-width: 92%; margin-bottom: 10px;display: inline-block;\", # noqa: E501\n \"placeholder\": \"Title\",\n }\n ),\n \"location\": TextInput(\n attrs={\n \"class\": \"form-control\",\n \"style\": \"max-width: 88%; margin-bottom: 10px;display: inline-block;\", # noqa: E501\n }\n ),\n \"borough\": Select(\n attrs={\n \"class\": \"btn dropdown-toggle\",\n \"style\": \"max-width: 35%; display: inline-block; border: 1px solid lightgray;\", # noqa: E501\n }\n ),\n \"state\": TextInput(\n attrs={\n \"class\": \"form-control\",\n \"readonly\": \"readonly\",\n \"style\": \"max-width: 19%;margin-bottom: 10px; display: inline-block;\", # noqa: E501\n }\n ),\n \"zipcode\": TextInput(\n attrs={\n \"class\": \"form-control\",\n \"style\": \"max-width: 20%; margin-bottom: 10px; display: inline-block;\", # noqa: E501\n \"placeholder\": \"Zip\",\n }\n ),\n \"date\": DatePickerInput(\n attrs={\n \"class\": \"form-control\",\n \"style\": \"max-width: 30%; margin-bottom: 10px;display: inline-block;\", # noqa: E501\n }\n ),\n \"time\": TimePickerInput(\n attrs={\n \"step\": \"any\",\n \"class\": \"form-control\",\n \"style\": \"max-width: 30%; margin-bottom: 10px;display: inline-block;\", # noqa: E501\n }\n ),\n \"date_created\": DateTimeInput(\n attrs={\n \"readonly\": \"readonly\",\n \"class\": \"form-control\",\n \"style\": \"max-width: 58%; margin-bottom: 10px;display: inline-block;\", # noqa: E501\n }\n ),\n \"event_type\": RadioSelect(\n attrs={\n \"class\": \"custom-radio-list\",\n \"style\": \"max-width: 300px; margin-bottom: 10px;\",\n }\n ),\n \"description\": Textarea(\n attrs={\n \"class\": \"form-control\",\n \"rows\": 4,\n \"style\": \"max-width: 100%; margin-bottom: 10px;\",\n }\n ),\n \"created_by\": TextInput(\n attrs={\n \"class\": \"form-control\",\n \"readonly\": \"readonly\",\n \"style\": \"max-width: 65%; display: inline-block;\",\n }\n ),\n \"friends_invited\": TextInput(\n attrs={\n \"class\": \"form-control\",\n \"style\": \"max-width: 92%; margin-bottom: 10px;display: inline-block;\",\n }\n ),\n }\n\n\nclass WorkoutForm(forms.ModelForm):\n class Meta:\n model = Workout\n fields = (\n \"title\",\n \"miles\",\n \"date\",\n \"time_start\",\n \"time_end\",\n \"date_created\",\n \"description\",\n \"created_by\",\n )\n widgets = {\n \"title\": TextInput(\n attrs={\n \"class\": \"form-control\",\n \"style\": \"max-width: 92%; margin-bottom: 10px;display: inline-block;\", # noqa: E501\n \"placeholder\": \"Title\",\n }\n ),\n \"miles\": NumberInput(\n attrs={\n \"class\": \"form-control\",\n \"style\": \"max-width: 92%; margin-bottom: 10px;display: inline-block;\", # noqa: E501\n }\n ),\n \"date\": DatePickerInput(\n attrs={\n \"class\": \"form-control\",\n \"style\": \"max-width: 30%; margin-bottom: 10px;display: inline-block;\", # noqa: E501\n }\n ),\n \"time_start\": TimePickerInput(\n attrs={\n \"step\": \"any\",\n \"class\": \"form-control\",\n \"style\": \"max-width: 30%; margin-bottom: 10px;display: inline-block;\", # noqa: E501\n }\n ),\n \"time_end\": TimePickerInput(\n attrs={\n \"step\": \"any\",\n \"class\": \"form-control\",\n \"style\": \"max-width: 30%; margin-bottom: 10px;display: inline-block;\", # noqa: E501\n }\n ),\n \"date_created\": DateTimeInput(\n attrs={\n \"readonly\": \"readonly\",\n \"class\": \"form-control\",\n \"style\": \"max-width: 58%; margin-bottom: 10px;display: inline-block;\", # noqa: E501\n }\n ),\n \"description\": Textarea(\n attrs={\n \"class\": \"form-control\",\n \"rows\": 4,\n \"style\": \"max-width: 100%; margin-bottom: 10px;\",\n }\n ),\n \"created_by\": TextInput(\n attrs={\n \"class\": \"form-control\",\n \"readonly\": \"readonly\",\n \"style\": \"max-width: 65%; display: inline-block;\",\n }\n ),\n }\n\n\nclass FriendMgmtForm(forms.Form):\n \"\"\"\n Manages friends connections\n \"\"\"\n\n friend_username = forms.CharField(\n max_length=100,\n required=False,\n label=\"Add \",\n widget=forms.TextInput(\n attrs={\n \"placeholder\": \"Username\",\n \"style\": \"width:50%;border: 1px solid gray; border-radius:5px;padding-bottom:4px;padding-left:6px;margin-left:10px\", # noqa: E501\n }\n ),\n )\n\n\n\"\"\"\nclass MyCustomSignupForm(SignupForm):\n pronouns = forms.ChoiceField(\n choices=(\n (\"He/Him\", \"He/Him\"),\n (\"She/Her\", \"She/Her\"),\n (\"They/Them\", \"They/Them\"),\n (\"\", \"Select your pronouns\"),\n )\n )\n description = forms.CharField(max_length=500, required=False)\n\n def save(self, request):\n user = super(MyCustomSignupForm, self).save(request)\n account1 = Account.objects.create(\n user=user,\n pronouns=self.cleaned_data[\"pronouns\"],\n description=self.cleaned_data[\"description\"],\n )\n print(account1)\n account1.save()\n user.save()\n return user\n\"\"\"\n\n\nclass CommentForm(forms.ModelForm):\n class Meta:\n model = Comment\n fields = (\"body\",)\n widgets = {\n \"body\": Textarea(\n attrs={\n \"class\": \"form-control\",\n \"rows\": 2,\n \"style\": \"max-width: 100%; margin-bottom: 10px;\",\n }\n ),\n }\n\n\nclass DiscForumCommentForm(forms.ModelForm):\n class Meta:\n model = DiscForumComment\n fields = (\"body\",)\n widgets = {\n \"body\": Textarea(\n attrs={\n \"class\": \"form-control\",\n \"rows\": 2,\n \"style\": \"max-width: 100%; margin-bottom: 10px;\",\n }\n ),\n }\n\n\nclass IssueForm(forms.ModelForm):\n class Meta:\n model = Issue\n fields = (\"title\", \"content\", \"author\", \"location\", \"date\")\n widgets = {\n \"title\": TextInput(\n attrs={\n \"class\": \"form-control\",\n \"style\": \"max-width: 92%; margin-bottom: 10px;display: inline-block;\", # noqa: E501\n \"placeholder\": \"Title\",\n }\n ),\n \"content\": Textarea(\n attrs={\n \"class\": \"form-control\",\n \"rows\": 4,\n \"style\": \"max-width: 100%; margin-bottom: 10px;\",\n }\n ),\n \"author\": TextInput(\n attrs={\n \"class\": \"form-control\",\n \"readonly\": \"readonly\",\n \"style\": \"max-width: 65%; display: inline-block;\",\n }\n ),\n \"location\": TextInput(\n attrs={\n \"class\": \"form-control\",\n \"style\": \"max-width: 92%; margin-bottom: 10px;display: inline-block;\", # noqa: E501\n \"placeholder\": \"Location\",\n }\n ),\n \"date\": DatePickerInput(\n attrs={\n \"readonly\": \"readonly\",\n \"class\": \"form-control\",\n \"style\": \"max-width: 30%; margin-bottom: 10px;display: inline-block;\", # noqa: E501\n }\n ),\n }\n","repo_name":"gcivil-nyu-org/INET-Team-3-F2022","sub_path":"bikingapp/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":11749,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"357436754","text":"import sys\n\nlanguage = sys.argv[1]\nreal_type = sys.argv[2] if len(sys.argv) > 2 else \"REAL_REAL\"\n\ndef readTSV(x):\n header = next(x).strip().split(\"\\t\")\n header = dict(zip(header, range(len(header))))\n data = [y.strip().split(\"\\t\") for y in x]\n if len(data) < 3:\n return (header, [])\n for column in range(len(header)):\n try:\n vals= [int(y[column]) for y in data]\n except ValueError:\n try:\n vals= [float(y[column]) for y in data]\n except ValueError:\n vals= [y[column] for y in data]\n for i in range(len(data)):\n data[i][column] = vals[i]\n return (header, data)\ntry:\n with open(\"../results/raw/word-level/\"+language+\"_after_tuning_onlyWordForms_boundedVocab.tsv\", \"r\") as inFile:\n data = readTSV(inFile)\nexcept IOError:\n print(\"\\t\".join(map(str, [language, 0,0 ])))\n quit()\n#print(len(data))\n\n\n\n#data = data %>% group_by(ModelID) %>% mutate(CumulativeMemory = cumsum(Distance*ConditionalMI), CumulativeMI = cumsum(ConditionalMI))\n\ndef g(frame, name, i):\n return frame[1][i][frame[0][name]]\n\nmatrix = [[g(data, \"ModelID\", i) ] for i in range(len(data[1]))]\n\nmatrixByType = {}\nmisByType = {}\n#unigramCEByType = {}\nfor i in range(len(data[1])):\n typ = g(data, \"Type\", i)\n# print(i,typ, len(data[1]))\n if typ not in matrixByType:\n matrixByType[typ] = []\n matrixByType[typ].append(matrix[i])\n\nprint(\"\\t\".join(map(str, [language, len(matrixByType.get(real_type,[])), len(matrixByType.get(\"RANDOM_BY_TYPE\",[])) ])))\n\n","repo_name":"m-hahn/memory-surprisal","sub_path":"code/analysis/models_stats/tradeoffStats_OnlyWordForms_BoundedVocab.py","file_name":"tradeoffStats_OnlyWordForms_BoundedVocab.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28178377082","text":"\"\"\"\r\n记一种比较笨的办法,空间换时间\r\n用字典存每个元素的次数\r\n\"\"\"\r\nclass Solution:\r\n def MoreThanHalfNum_Solution(self, numbers):\r\n # write code here\r\n dict = {}\r\n for no in numbers:\r\n if not dict.has_key(no):\r\n dict[no] = 1\r\n else:\r\n dict[no] = dict[no] + 1\r\n if dict[no] > len(numbers)/2:\r\n return no\r\n return 0\r\n","repo_name":"jasonusaco/Leetcode-Practice","sub_path":"剑指offer/数组中出现次数超过一半的数.py","file_name":"数组中出现次数超过一半的数.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74659370932","text":"import torch\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torch.nn.init as init\nfrom torch import nn\nfrom torch import distributions\nfrom torch.distributions import (\n Normal,\n MultivariateNormal,\n Uniform,\n TransformedDistribution,\n SigmoidTransform,\n)\nfrom torch.nn.parameter import Parameter\nfrom torch.optim.optimizer import Optimizer, required\n\n# import pandas as pd\n\n\nfrom nf.flows import (\n AffineConstantFlow,\n ActNorm,\n Invertible1x1Conv,\n NormalizingFlow,\n NormalizingFlowModel,\n)\nfrom nf.spline_flows import NSF_CL\n\nimport itertools\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom time import time\n\nfrom torch.utils.data import DataLoader, TensorDataset\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\ntorch.cuda.set_device(device)\n\nif device.type == \"cuda\":\n print(torch.cuda.get_device_name(0))\nelif device.type == \"cpu\":\n print(\"Using the cpu...\")\n\n# choose data here\nspectra = np.load(\"./data/X_train_payne_region_cond_temp_logg.npy\")\nspectra = spectra.T\nprint(spectra.shape)\n\n# use even number of dimensions\nspectra = spectra[:, 1:]\nspectra = torch.Tensor(spectra)\nspectra = spectra - 0.5\ndim = spectra.shape[-1]\nprint('spectra dim is', dim)\nprint(spectra.shape)\n\n# conditional labels here\n\nlabels = np.load(\"./data/y_train_payne_region_cond_temp_logg.npy\")\nlabels.shape\n\n# conditioning on teff, logg\ny = np.array([labels[:, 0], labels[:, 1]]).T\ny = torch.tensor(y, dtype=torch.float32).reshape(-1, 2)\nprint(y.shape)\n\ncond_dim = y.shape[-1]\nprint(\"y dim is:\", cond_dim)\n\n# choose prior here\nbase_mu, base_cov = torch.zeros(dim).to(device), torch.eye(dim).to(device)\nprior = MultivariateNormal(base_mu, base_cov)\n\n# configure the normalising flow\nnfs_flow = NSF_CL\nflows = [\n nfs_flow(\n dim=dim, device=device, context_features=cond_dim, K=8, B=3, hidden_dim=256\n )\n for _ in range(30)\n] # things to change> maybe more is needed??!\nconvs = [Invertible1x1Conv(dim=dim, device=device) for _ in flows]\nnorms = [ActNorm(dim=dim, device=device) for _ in flows]\nflows = list(itertools.chain(*zip(norms, convs, flows)))\n\n# initialise the model\nmodel = NormalizingFlowModel(prior, flows, device)\n\nif torch.cuda.device_count() > 1:\n print(\"Let's use\", torch.cuda.device_count(), \"GPUs!\")\n model = nn.DataParallel(model)\n\nmodel = model.to(device)\noptimizer = optim.Adam(model.parameters(), lr=2e-6, weight_decay=1e-5) # todo tune WD\n# print(\"number of params: \", sum(p.numel() for p in model.parameters()))\n\n# train_loader\ndataset = TensorDataset(spectra, y)\n\n# Create a data loader from the dataset\n# Type of sampling and batch size are specified at this step\nloader = DataLoader(\n dataset, batch_size=100, shuffle=True, pin_memory=True\n) # this will give x, y per batch\n\nt0 = time()\nmodel.train()\nprint(\"Started training\")\nn_epochs = 500\nloss_history = []\n\n\n# loss_history=[]\nmodel.train()\nprint(\"Started training\")\nfor k in range(n_epochs):\n for batch_idx, data_batch in enumerate(loader):\n x, y = data_batch\n x = x.to(device)\n y = y.to(device)\n zs, prior_logprob, log_det = model(\n x, context=y\n ) # definitely need to make this work better!?\n del x\n del y\n logprob = prior_logprob + log_det\n loss = -torch.sum(logprob) # NL\n\n model.zero_grad()\n loss.backward()\n optimizer.step()\n # loss_history.append(float(loss))\n if k % 100 == 0:\n print(\"Loss at step k =\", str(k) + \":\", loss.item())\n\nt1 = time()\nprint(f\"Elapsed time: {t1-t0:.1f} s\")\n\n# Specify a path to save to\nPATH = \"model_cond_exp1.pt\"\n# Save\ntorch.save(model.module.state_dict(), PATH)\nnp.savetxt(\"loss_hist_cond_exp1.npy\", loss_history)\n\nmodel.eval()\ncont = np.ones((100, 2))\ncont[:, 0] = 4.5\ncont[:, 1] = 2.1\n\ncont = torch.tensor(cont, dtype=torch.float32).reshape(-1, 2)\n\nzs = model.sample(100, context=cont)\nz = zs[-1]\nz = z.to('cpu')\nz = z.detach().numpy()\n\nfig = plt.figure(figsize=(14, 4))\n\nfor i in range(10):\n plt.plot(z[i])\n\nplt.savefig('model_cond_exp1.png')\n","repo_name":"errai34/DeepSpectra","sub_path":"payne_cond_exp1.py","file_name":"payne_cond_exp1.py","file_ext":"py","file_size_in_byte":4081,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"37277162288","text":"import tkinter as tk\nimport requests\n\nclass Application(tk.Frame):\n def __init__(self, master=None):\n super().__init__(master)\n self.pack()\n self.create_widgets()\n\n def create_widgets(self):\n self.label = tk.Label(text=\"Check site of\")\n self.label.pack()\n self.entrythingy = tk.Entry()\n self.entrythingy.pack()\n\n self.contents = tk.StringVar()\n self.entrythingy['textvariable'] = self.contents\n self.entrythingy.bind('', self.check_site)\n\n self.hi_there = tk.Button(self)\n self.hi_there['text'] = 'Check site'\n self.hi_there['command'] = self.check_site\n self.hi_there.pack()\n\n self.quit = tk.Button(self, text='QUIT', command=root.destroy)\n self.quit.pack()\n\n def check_site(self, event=None):\n url = self.contents.get().strip()\n if not url.startswith('http'):\n url = 'http://{}'.format(url)\n \n resp = requests.get(url)\n print(\"{} response: {}\".format(url, resp.status_code))\n\n var = tk.StringVar()\n if (resp.status_code==200):\n var.set(\"OK\")\n else:\n var.set(\"not OK\")\n\n self.label = tk.Label(textvariable=var)\n self.label.pack()\n\n\nroot = tk.Tk()\napp = Application(master=root)\napp.master.title('My checker app')\napp.master.minsize(200, 150)\napp.mainloop()","repo_name":"sytungan/repoT","sub_path":"testConnect.py","file_name":"testConnect.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22144701876","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport time\n\nst.title(\"Etc\")\n\nst.markdown(\"---\")\n\n\nwith st.echo():\n my_bar = st.progress(0)\n \n for percent_complete in range(100):\n time.sleep(0.05)\n my_bar.progress(percent_complete + 1)\n\n with st.spinner(\"Wait!\"):\n time.sleep(5)\n st.write(\"done\")\nst.markdown(\"---\")\n\nwith st.echo():\n st.error(\"st error\")\n st.warning(\"st warning\")\n st.info(\"st info\")\n st.success(\"st success\")\n e = RuntimeError(\"example of error\")\n st.exception(e)\nst.markdown(\"---\")\n\n\n","repo_name":"jja8989/streamlit_tutorial","sub_path":"pages/6_🔖Etc.py","file_name":"6_🔖Etc.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33110596986","text":"# -*- coding: utf-8 -*-\nfrom sys import argv\n\n\nclass Costs:\n insert = 1\n delete = 1\n change = 2 # like insertion + deletion\n swap = 2 # same\n\n\ndef costs_matrix(a, b, costs):\n \"\"\"\n a = 'xyz'\n b = 'ayzb'\n\n m = / a y z b\n 0 1 2 3 4\n x 1 2 3 4 5\n y 2 3 2 3 4\n z 3 4 3 2 3\n\n m[row][col] -> m[2][4] == 6\n \"\"\"\n height = len(a) + 1\n width = len(b) + 1\n\n m = [[0] * width for _ in range(height)]\n\n for row in range(height):\n m[row][0] = row * costs.delete\n\n for col in range(width):\n m[0][col] = col * costs.insert\n\n for row in range(1, height):\n for col in range(1, width):\n north = m[row - 1][col]\n west = m[row][col - 1]\n north_west = m[row - 1][col - 1]\n\n if a[row - 1] == b[col - 1]:\n m[row][col] = north_west\n else:\n m[row][col] = min(\n north + costs.delete, west + costs.insert, north_west + costs.change\n )\n\n if row > 1 and col > 1 and a[row - 2] == b[col - 1] and a[row - 1] == b[col - 2]:\n\n before_two = m[row - 2][col - 2]\n\n m[row][col] = min(m[row][col], before_two + costs.swap)\n\n return m\n\n\ndef edit_dist(a, b, costs=None):\n costs = costs or Costs()\n m = costs_matrix(a, b, costs)\n return m[-1][-1]\n\n\ndef _print_costs_matrix(a, b):\n # XXX mainly for debug purposes; consider removing\n m = costs_matrix(a, b, Costs())\n\n print(' '.join('/ ' + b))\n\n for line, c in zip(m, ' ' + a):\n print(c, end=' ')\n\n for item in line:\n print(item, end=' ')\n\n print()\n\n\ndef print_costs_matrix():\n a, b = 'xyz', 'ayzb'\n print_costs_matrix(a, b)\n\n\ndef main():\n a, b = argv[1:]\n print(edit_dist(a, b))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ThreatConnect-Inc/threatconnect-playbooks","sub_path":"apps/TCPB_-_Expressions/src/spamspy/edit_dist.py","file_name":"edit_dist.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"21"} +{"seq_id":"18216553962","text":"class Solution:\n def maximumDifference(self, nums: List[int]) -> int:\n ans = -1\n mini = nums[0]\n\n for i in range(len(nums)):\n if nums[i] > mini:\n ans = max(ans, nums[i] - mini)\n mini = min(mini, nums[i])\n\n return ans\n","repo_name":"walkccc/LeetCode","sub_path":"solutions/2016. Maximum Difference Between Increasing Elements/2016.py","file_name":"2016.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"de","doc_type":"code","stars":756,"dataset":"github-code","pt":"21"} +{"seq_id":"42630649733","text":"# coding: utf-8\n\n\ns = \"hanxiaocu 帅帅的\"\n\ns.isalnum() # 所有字符都是数字或者字母\ns.isalpha() # 所有字符都是字母\ns.isdigit() # 所有字符都是数字\ns.islower() # 所有字符都是小写\ns.isupper() # 所有字符都是大写\ns.istitle() # 所有单词都是首字母大写,像标题\ns.isspace() # 所有字符都是空白字符、\\t、\\n\n\n\ns.upper() #把所有字符中的小写字母转换成大写字母\ns.lower() #把所有字符中的大写字母转换成小写字母\ns.capitalize() #把第一个字母转化为大写字母,其余小写\ns.title() #把每个单词的第一个字母转化为大写,其余小写\n\n\n# string.atof(s)将字符串转为浮点型数字","repo_name":"SmallBlackBeans/pythonPractice","sub_path":"hello/Base/字符串.py","file_name":"字符串.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1015190168","text":"from selenium import webdriver\nimport requests\nimport bs4\nimport os\nimport time\n\nbrowser = webdriver.Firefox(executable_path=r'C:\\Users\\mathew\\Documents\\geko\\geckodriver.exe')\nbrowser.get(\"https://soundcloud.com/\")\n\ntime.sleep(2) \n\n#method creation\n\ndef automatic_heart(genre):\n\tglobal like_speed, like_count\n\turl = ('https://soundcloud.com/search/sounds?q=' + genre + '&filter.created_at=last_hour')\n\tbrowser.get(url)\n\tprint(\"searching genre \", genre_inp)\n\tprint('liking posts')\n\ttry:\n\t\tfor like_index in range(2, like_count):\n\t\t\tlike_url = ('li.searchList__item:nth-child(' + str(like_index) + ') > div:nth-child(1) > div:nth-child(1) > div:nth-child(1) > div:nth-child(2) > div:nth-child(4) > div:nth-child(1) > div:nth-child(1) > div:nth-child(1) > button:nth-child(1)')\n\t\t\tlike_element = browser.find_element_by_css_selector(like_url)\n\t\t\ttime.sleep(0.5)\n\t\t\tlike_element.click()\n\t\t\ttime.sleep(0.5)\n\t\t\tbrowser.execute_script('arguments[0].scrollIntoView(true);', like_element) \n\t\t\ttime.sleep(like_speed)\n\texcept:\n\t\tprint(\"dont manipulate with the window or not enough posts\")\n\n\n#var\n\nlike_count = 50\nlike_speed = 1\nloggin_indicator = False\ngenre_inp = \"house\"\n\n\n#element assignment\n\nsignin_element = browser.find_element_by_css_selector('.frontHero__loginButton')\n\n\n#action\n\nsignin_element.click()\n\nprint('please login using gmail or facebook')\n\ntime.sleep(2)\n\nwhile loggin_indicator == False:\n\tif browser.current_url == \"https://soundcloud.com/discover\":\n\t\tprint(\"youre now logged in\")\n\t\tloggin_indicator = True\n\nbrowser.maximize_window()\n\nautomatic_heart(genre_inp)\n\nprint(\"finished liking posts\")\n\n","repo_name":"mathew8e/crisp","sub_path":"soundcloudbot.py","file_name":"soundcloudbot.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"13238162807","text":"from __future__ import division\n#\n# Format Viewer, Marius TIVADAR, 2014\n#\n#\n\nfrom builtins import object\nfrom PyQt5 import QtGui\n\n\nclass SelectionType(object):\n NORMAL = 0\n PERMANENT = 1\n TEXTHIGHLIGHT = 2\n\n\nclass Selection(object):\n def __init__(self, themes, viewMode):\n self.themes = themes\n self.viewMode = viewMode\n self.selecting = False\n self.Selections = []\n self.PermanentSelections = []\n self.MAX_SELECTIONS = 1\n self.defaultBrush = QtGui.QBrush(self.themes['selection'])\n\n self.last = 0\n self.HighlightSelections = []\n\n def drawSelection(self, qp, start, end, brush=None, opacity=0.4):\n raise Exception(\"Not Implemented\")\n\n def addSelection(self, t, seltype=None):\n if len(t) == 4:\n u, v, b, o = t\n else:\n u, v = t\n b, o = None, None\n\n if not o:\n o = 0.4\n\n if not b:\n b = self.defaultBrush\n\n if u - v == 0:\n return\n\n t = u, v, b, o\n\n if seltype == SelectionType.NORMAL:\n if len(self.Selections) >= self.MAX_SELECTIONS:\n self.Selections = []\n\n self.Selections.append(t)\n\n if seltype == SelectionType.PERMANENT:\n for w in self.PermanentSelections:\n if t[0] == w[0] and t[1] == w[1]:\n return False\n\n # u, v not found\n self.PermanentSelections.append(t)\n\n if seltype == SelectionType.TEXTHIGHLIGHT:\n self.HighlightSelections.append(t)\n\n return True\n\n def removeSelection(self, u, v, seltype):\n if seltype == SelectionType.NORMAL:\n L = self.selectionStartOffset\n\n elif seltype == SelectionType.PERMANENT:\n L = self.PermanentSelections\n\n elif seltype == SelectionType.TEXTHIGHLIGHT:\n L = self.HighlightSelections\n\n else:\n raise Exception(\"Selection type unknown\")\n\n L[:] = [t for t in L if t[0] != u and t[1] != v]\n return\n\n def drawSelections(self, qp):\n # draw permanent\n for t in self.PermanentSelections:\n start, end, b, o = t\n self.drawSelection(qp, start, end, brush=b, opacity=o)\n\n # draw already selected\n for t in self.Selections:\n start, end, b, o = t\n self.drawSelection(qp, start, end)\n\n # \n for t in self.HighlightSelections:\n start, end, b, o = t\n self.drawSelection(qp, start, end)\n self.HighlightSelections = []\n\n # draw current\n if self.selecting:\n self.drawSelection(qp, *self.getCurrentSelection())\n\n def resetSelections(self):\n self.Selections = []\n\n def startSelection(self):\n if not self.selecting:\n self.selecting = True\n self.selectionStartOffset = self.viewMode.getCursorAbsolutePosition()\n if len(self.Selections) >= self.MAX_SELECTIONS:\n self.Selections = []\n\n def getCurrentSelection(self):\n if self.selecting:\n a = self.selectionStartOffset\n b = self.viewMode.getCursorAbsolutePosition() + 1\n if a < b:\n return a, b\n else:\n return b, a\n else:\n for s in self.Selections:\n u, v, b, o = s\n # pass auf! , in theory we could have more then one normal selection\n # so here the first one is returned.\n # but currently, by design we could only have one NORMAL selection\n return u, v\n\n # if self.last:\n # return self.last\n\n return None\n\n def stopSelection(self):\n if self.selecting:\n u, v = self.getCurrentSelection()\n\n self.addSelection((u, v, QtGui.QBrush(self.themes['selection']), 0.4), type=SelectionType.NORMAL)\n self.last = u, v\n\n self.selecting = False\n self.selectionStartOffset = None\n\n def highlightText(self):\n dataModel = self.viewMode.getDataModel()\n page = self.viewMode.getDisplayablePage()\n\n # for a search-in-page\n t = self.getCurrentSelection()\n\n if not t:\n # no selection\n return\n\n start, end = t\n\n if start == end:\n return\n\n text = dataModel.getStream(start, end)\n Exclude = [start]\n\n cols, rows = self.viewMode.getGeometry()\n\n # find all occurrence\n lenText = len(text)\n M = []\n idx = 0\n if lenText > 0:\n while idx < len(page):\n idx = page.find(text, idx, len(page))\n\n if idx == -1:\n break\n M.append((idx, lenText))\n idx += lenText\n\n # Match = [(m.start(), m.end()) for m in re.finditer(bytes(text), bytes(page))]\n\n for start, end in M:\n # print start, end\n # self._makeSelection(qp, start, end, cols, rows)\n off = dataModel.getOffset()\n if off + start not in Exclude:\n # self._makeSelection(off + start, off + start + end, brush=QtGui.QBrush(QtGui.QColor(125, 255, 0)))\n # self.viewMode.selector.addSelection((off+start, off + start + end, QtGui.QBrush(QtGui.QColor(125, 255, 0)), 0.4))\n self.addSelection((off + start, off + start + end, QtGui.QBrush(self.themes['selection']), 0.4),\n type=SelectionType.TEXTHIGHLIGHT)\n\n\nclass DefaultSelection(Selection):\n def __init__(self, themes, viewMode):\n super(DefaultSelection, self).__init__(themes, viewMode)\n self.MAX_SELECTIONS = 1\n\n def _makeSelection(self, qp, start, end, brush):\n if not brush:\n brush = QtGui.QBrush(self.themes['selection'])\n dataModel = self.viewMode.getDataModel()\n off = dataModel.getOffset()\n length = len(self.viewMode.getDisplayablePage())\n cols, rows = self.viewMode.getGeometry()\n\n # return if out of view\n if end < off:\n return\n\n if start > off + length:\n return\n\n if start < off:\n d0 = 0\n else:\n d0 = start - off\n\n if end > off + length:\n d1 = length\n else:\n d1 = end - off\n\n mark = True\n height = 14\n\n qp.setOpacity(0.4)\n while mark:\n if d0 // cols == d1 // cols:\n qp.fillRect((d0 % cols) * 8, (d0 // cols) * height, (d1 - d0) * 8, 1 * height, brush)\n d0 += (d1 - d0)\n else:\n qp.fillRect((d0 % cols) * 8, (d0 // cols) * height, (cols - d0 % cols) * 8, 1 * height, brush)\n d0 += (cols - d0 % cols)\n\n if d1 - d0 <= 0:\n mark = False\n qp.setOpacity(1)\n\n def drawSelection(self, qp, start, end, brush=None, opacity=0.4):\n if not brush:\n brush = QtGui.QBrush(self.themes['selection'])\n\n dataModel = self.viewMode.getDataModel()\n off = dataModel.getOffset()\n length = len(self.viewMode.getDisplayablePage())\n cols, rows = self.viewMode.getGeometry()\n\n # return if out of view\n if end < off:\n return\n\n if start > off + length:\n return\n\n if start < off:\n d0 = 0\n else:\n d0 = start - off\n\n if end > off + length:\n d1 = length\n else:\n d1 = end - off\n\n mark = True\n height = self.viewMode.fontHeight\n width = self.viewMode.fontWidth\n\n qp.setOpacity(opacity)\n\n offset = 2\n\n while mark:\n if d0 // cols == d1 // cols:\n qp.fillRect((d0 % cols) * width, (d0 // cols) * height + offset, (d1 - d0) * width, 1 * height, brush)\n d0 += (d1 - d0)\n else:\n qp.fillRect((d0 % cols) * width, (d0 // cols) * height + offset, (cols - d0 % cols) * width, 1 * height,\n brush)\n d0 += (cols - d0 % cols)\n\n if d1 - d0 <= 0:\n mark = False\n qp.setOpacity(1)\n\n\nclass HexSelection(Selection):\n def __init__(self, themes, viewMode):\n super(HexSelection, self).__init__(themes, viewMode)\n self.MAX_SELECTIONS = 1\n\n def drawSelection(self, qp, start, end, brush=None, opacity=0.4):\n if not brush:\n brush = QtGui.QBrush(self.themes['selection'])\n\n dataModel = self.viewMode.getDataModel()\n off = dataModel.getOffset()\n length = len(self.viewMode.getDisplayablePage())\n cols, rows = self.viewMode.getGeometry()\n\n # return if out of view\n if end < off:\n return\n\n if start > off + length:\n return\n\n if start < off:\n d0 = 0\n else:\n d0 = start - off\n\n if end > off + length:\n d1 = length\n else:\n d1 = end - off\n\n mark = True\n height = self.viewMode.fontHeight\n width = self.viewMode.fontWidth\n\n qp.setOpacity(opacity)\n while mark:\n if d0 // cols == d1 // cols:\n # +2 is an offset for letters\n qp.fillRect(3 * (d0 % cols) * width, (d0 // cols) * height + 2, 3 * (d1 - d0) * width - width,\n 1 * height, brush)\n qp.fillRect(3 * cols * width + self.viewMode.gap * width + (d0 % cols) * width,\n (d0 // cols) * height + 2, (d1 - d0) * width, 1 * height, brush)\n\n d0 += (d1 - d0)\n else:\n qp.fillRect(3 * (d0 % cols) * width, (d0 // cols) * height + 2, 3 * (cols - d0 % cols) * width - width,\n 1 * height, brush)\n qp.fillRect(3 * cols * width + self.viewMode.gap * width + (d0 % cols) * width,\n (d0 // cols) * height + 2, (cols - d0 % cols) * width, 1 * height, brush)\n\n d0 += (cols - d0 % cols)\n\n if d1 - d0 <= 0:\n mark = False\n qp.setOpacity(1)\n\n\nclass DisasmSelection(Selection):\n def __init__(self, themes, viewMode):\n super(DisasmSelection, self).__init__(themes, viewMode)\n self.MAX_SELECTIONS = 1\n\n def drawSelection(self, qp, start, end, brush=None, opacity=0.4):\n if not brush:\n brush = QtGui.QBrush(self.themes['selection'])\n\n dataModel = self.viewMode.getDataModel()\n off = dataModel.getOffset()\n length = sum([o.size for o in self.viewMode.OPCODES]) # TODO: not nice!\n cols, rows = self.viewMode.getGeometry()\n\n # return if out of view\n if end < off:\n return\n\n if start > off + length:\n return\n\n if start < off:\n d0 = 0\n else:\n d0 = start - off\n\n if end > off + length:\n d1 = length\n else:\n d1 = end - off\n\n mark = True\n height = self.viewMode.fontHeight\n width = self.viewMode.fontWidth\n\n qp.setOpacity(opacity)\n\n offset = 2\n\n size = 0\n for i, asm in enumerate(self.viewMode.OPCODES):\n if size + asm.size > d0 and size <= d1:\n\n # compute x offset\n x = d0 - size\n if size > d0:\n x = 0\n\n # compute width\n w = asm.size\n if size + asm.size > d1:\n w = d1 - size\n\n qp.fillRect(x * 3 * width, i * height + offset, (w - x) * 3 * width - width, 1 * height, brush)\n\n size += asm.size\n\n qp.setOpacity(1)\n","repo_name":"amimo/dcc","sub_path":"androguard/gui/TextSelection.py","file_name":"TextSelection.py","file_ext":"py","file_size_in_byte":11726,"program_lang":"python","lang":"en","doc_type":"code","stars":975,"dataset":"github-code","pt":"21"} +{"seq_id":"11913628417","text":"from django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework import permissions, filters\nfrom rest_framework.generics import CreateAPIView, ListAPIView, RetrieveUpdateDestroyAPIView\nfrom rest_framework.pagination import LimitOffsetPagination\n\nfrom goals.models import Comment\nfrom goals import serializers\nfrom goals.permissions import CommentPermissions\n\n\nclass CommentCreateView(CreateAPIView):\n model = Comment\n permission_classes = [permissions.IsAuthenticated]\n serializer_class = serializers.CommentCreateSerializer\n\n def perform_create(self, serializer: serializers.CommentCreateSerializer) -> None:\n serializer.save(goal_id=self.request.data['goal'])\n\n\nclass CommentListView(ListAPIView):\n model = Comment\n permission_classes = [permissions.IsAuthenticated, CommentPermissions]\n serializer_class = serializers.CommentSerializer\n pagination_class = LimitOffsetPagination\n filter_backends = [filters.OrderingFilter, DjangoFilterBackend]\n filterset_fields = [\"goal\"]\n ordering = [\"-id\"]\n\n def get_queryset(self) -> Comment:\n return Comment.objects.filter(\n goal__category__board__participants__user=self.request.user\n )\n\n\nclass CommentView(RetrieveUpdateDestroyAPIView):\n model = Comment\n serializer_class = serializers.CommentSerializer\n permission_classes = [permissions.IsAuthenticated, CommentPermissions]\n\n def get_queryset(self) -> Comment:\n return Comment.objects.filter(\n goal__category__board__participants__user=self.request.user\n )\n","repo_name":"morley-d/todolist","sub_path":"goals/views/comment.py","file_name":"comment.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"19023968137","text":"from django.urls import path\n\nfrom .views import products_list, upload_product,product_detail,add_to_cart,edit_product_view\n\n\n\nurlpatterns = [\n path(\"products/upload\", upload_product, name=\"products_upload_view\"),\n path(\"products/list\", products_list, name=\"products_list_view\"),\n path(\"products/\", product_detail, name=\"product_detail_view\"),\n path(\"add_to_cart/\",add_to_cart,name='add_to_cart_view'),\n path(\"products/edit//\",edit_product_view,name = \"product_edit_view\"),\n]\n\n\n","repo_name":"EuniceMusenyia/GreenKiosk__Backend","sub_path":"inventory/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17657515476","text":"class Solution:\n def pivotIndex(self, nums) -> int:\n sums = []\n sum_ = 0\n for num in nums:\n sum_ += num\n sums.append(sum_)\n #print(sums)\n N = len(nums)\n for i in range(N):\n if i == 0:\n lsum = 0\n rsum = sums[N-1]-sums[i]\n #print(lsum, rsum)\n elif i == N-1:\n lsum = sums[N-2]\n rsum = 0\n #print(lsum, rsum)\n else:\n lsum = sums[i-1]\n rsum = sums[N-1]-sums[i]\n #print(i, lsum, rsum)\n if lsum == rsum:\n return i\n return -1\n\nif __name__ == '__main__':\n sol = Solution()\n #nums = [1,7,3,6,5,6]\n #nums = [1,2,3]\n #nums = [2,1,-1]\n nums = [5]\n ret = sol.pivotIndex(nums)\n print(ret)","repo_name":"shiannn/LeetCodePython","sub_path":"724. Find Pivot Index.py","file_name":"724. Find Pivot Index.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10429948245","text":"from os import listdir\nfrom os.path import isfile, join\nmypath = 'seq'\nonlyfiles = [ f.split('.')[0] for f in listdir(mypath) if isfile(join(mypath,f)) ]\n\nimport subprocess\ncluster_prog = \"/s/bovine/e/nobackup/common/tools/cd-hit-v4.6.1-2012-08-27/cd-hit -i %s -o %s\"\nfor id in onlyfiles:\n arg1 = \"-i ../seq/%s.fa\" % id\n arg2 = \"-o ../cdhit/%s.out\" % id\n subprocess.call([cluster_prog, arg1, arg2])\n","repo_name":"raymondr/amr-pipeline-tools","sub_path":"eachfile.py","file_name":"eachfile.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43039653477","text":"def main():\n n, k = map(int, input().split())\n A = list(map(int, input().split()))\n # 累積和\n A = [0] + A\n n += 1\n for i in range(1, len(A)):\n A[i] += A[i-1]\n\n num_list = []\n for ri in range(n):\n for le in range(ri):\n # [le, ri)の和を求める\n val = A[ri] - A[le]\n num_list.append(val)\n\n # num_listからk個選んで、andを計算した時の最大値を求める\n max_num = max(num_list)\n max_bit = 1\n while 1 << max_bit <= max_num:\n max_bit += 1\n\n # 上からbit毎に考え、1となる数がk個以上あれば、それを答えに加算\n ans = 0\n for bit in reversed(range(max_bit+1)):\n cnt = calc_cnt(num_list, bit)\n if cnt >= k:\n ans += 1 << bit\n remove_num(num_list, bit)\n print(ans)\n\n\ndef calc_cnt(num_list, bit):\n cnt = 0\n for num in num_list:\n if num & (1 << bit):\n cnt += 1\n return cnt\n\n\ndef remove_num(num_list, bit):\n # bitが0のものを除く\n for i, num in enumerate(num_list):\n if num & (1 << bit) == 0:\n num_list[i] = 0\n\n\nmain()\n","repo_name":"batamorphism/coding","sub_path":"Python/AtCoder/old/dowa_0312.py","file_name":"dowa_0312.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42384358535","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 8 10:32:53 2018\n\n@author: truc\n\"\"\"\n\nimport glob\nimport os\nimport librosa\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import specgram\nfrom librosa.display import waveplot\nplt.close('all')\n\nsound_file_paths = \"a039_10_20_forest_path.wav\" #\"a038_30_40_home.wav\"\n\nparent_dir = 'small_data/'\n\nX,sr = librosa.load(os.path.join(parent_dir, sound_file_paths))\n\nS = librosa.feature.melspectrogram(X, sr=sr, n_mels=128)\n\n# Convert to log scale (dB). We'll use the peak power as reference.\nlog_S = librosa.logamplitude(S, ref_power=np.max)\n\n# Make a new figure\nplt.figure()\nlibrosa.display.waveplot(np.array(X),sr=22050,)\n\nplt.figure()\nlibrosa.display.specshow(log_S, sr=sr, x_axis='time', y_axis='mel')\n\nplt.colorbar(format='%+02.0f dB')\nplt.show()\n\n\nfrom sklearn.decomposition import NMF\nmodel = NMF(n_components=3, init='random', random_state=0)\nW = model.fit_transform(S)\nH = model.components_\nprint(W.shape)\nprint(H.shape)\n\nmodel = NMF(n_components=3, init='random', random_state=0)\nW1 = model.fit_transform(S.reshape(S.shape[1],S.shape[0]))\nH1 = model.components_\nprint(W1.shape)\nprint(H1.shape)\n\nfrom sklearn.cluster import KMeans\nkmeans = KMeans(n_clusters=3, random_state=0).fit(log_S)\nkmeans.labels_\n\n\nkmeans1 = KMeans(n_clusters=3, random_state=0).fit(log_S.reshape(S.shape[1],S.shape[0]))\nkmeans1.labels_\n\ndd = np.zeros(log_S.shape)\ndd[np.where(kmeans.labels_==2)]=log_S[kmeans.labels_==2]\nplt.figure()\nlibrosa.display.specshow(dd, sr=sr, x_axis='time', y_axis='mel')\n\nplt.figure()\nres = kmeans.cluster_centers_[kmeans.labels_.flatten()]\nres2 = res.reshape((log_S.shape))\nplt.figure()\nlibrosa.display.specshow(res2, sr=sr, x_axis='time', y_axis='mel')\nplt.show\n","repo_name":"nguyenthikimtruc/asc_","sub_path":"test_sing_NFM.py","file_name":"test_sing_NFM.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23589823497","text":"import torch\nimport torch.nn as nn\nfrom torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, ReLU, Sigmoid, Dropout, MaxPool2d, \\\n AdaptiveAvgPool2d, Sequential, Module\nfrom collections import namedtuple\n\nfrom .helpers import named_apply\nfrom .layers import PatchEmbed, DropPath, trunc_normal_, lecun_normal_\nfrom functools import partial\nimport math\nfrom einops import rearrange\nfrom models import layers\n\n\nclass Flatten(Module):\n def forward(self, input):\n return input.view(input.size(0), -1)\n\ndef l2_norm(input, axis=1):\n norm = torch.norm(input, 2, axis, True)\n output = torch.div(input, norm)\n\n return output\n\nclass SEModule(Module):\n def __init__(self, channels, reduction):\n super(SEModule, self).__init__()\n self.avg_pool = AdaptiveAvgPool2d(1)\n self.fc1 = Conv2d(\n channels, channels // reduction, kernel_size=1, padding=0, bias=False)\n\n nn.init.xavier_uniform_(self.fc1.weight.data)\n\n self.relu = ReLU(inplace=True)\n self.fc2 = Conv2d(\n channels // reduction, channels, kernel_size=1, padding=0, bias=False)\n\n self.sigmoid = Sigmoid()\n\n def forward(self, x):\n module_input = x\n x = self.avg_pool(x)\n x = self.fc1(x)\n x = self.relu(x)\n x = self.fc2(x)\n x = self.sigmoid(x)\n\n return module_input * x\n\nclass bottleneck_IR(Module):\n def __init__(self, in_channel, depth, stride):\n super(bottleneck_IR, self).__init__()\n if in_channel == depth:\n self.shortcut_layer = MaxPool2d(1, stride)\n else:\n self.shortcut_layer = Sequential(\n Conv2d(in_channel, depth, (1, 1), stride, bias=False), BatchNorm2d(depth))\n self.res_layer = Sequential(\n BatchNorm2d(in_channel),\n Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth),\n Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth))\n\n def forward(self, x):\n shortcut = self.shortcut_layer(x)\n res = self.res_layer(x)\n\n return res + shortcut\n\nclass bottleneck_IR_SE(Module):\n def __init__(self, in_channel, depth, stride):\n super(bottleneck_IR_SE, self).__init__()\n if in_channel == depth:\n self.shortcut_layer = MaxPool2d(1, stride)\n else:\n self.shortcut_layer = Sequential(\n Conv2d(in_channel, depth, (1, 1), stride, bias=False),\n BatchNorm2d(depth))\n self.res_layer = Sequential(\n BatchNorm2d(in_channel),\n Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),\n PReLU(depth),\n Conv2d(depth, depth, (3, 3), stride, 1, bias=False),\n BatchNorm2d(depth),\n SEModule(depth, 16)\n )\n\n def forward(self, x):\n shortcut = self.shortcut_layer(x)\n res = self.res_layer(x)\n\n return res + shortcut\n\nclass Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])):\n '''A named tuple describing a ResNet block.'''\n\ndef get_block(in_channel, depth, num_units, stride=2):\n\n return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)]\n\ndef get_blocks(num_layers):\n if num_layers == 50:\n blocks = [\n get_block(in_channel=64, depth=64, num_units=3),\n get_block(in_channel=64, depth=128, num_units=4),\n get_block(in_channel=128, depth=256, num_units=14),\n # get_block(in_channel=256, depth=512, num_units=3)\n ]\n elif num_layers == 100:\n blocks = [\n get_block(in_channel=64, depth=64, num_units=3),\n get_block(in_channel=64, depth=128, num_units=13),\n get_block(in_channel=128, depth=256, num_units=30),\n get_block(in_channel=256, depth=512, num_units=3)\n ]\n elif num_layers == 152:\n blocks = [\n get_block(in_channel=64, depth=64, num_units=3),\n get_block(in_channel=64, depth=128, num_units=8),\n get_block(in_channel=128, depth=256, num_units=36),\n get_block(in_channel=256, depth=512, num_units=3)\n ]\n\n return blocks\n\nclass Backbone(Module):\n def __init__(self, input_size, num_layers, mode='ir'):\n super(Backbone, self).__init__()\n assert input_size[0] in [112, 224], \"input_size should be [112, 112] or [224, 224]\"\n assert num_layers in [50, 100, 152], \"num_layers should be 50, 100 or 152\"\n assert mode in ['ir', 'ir_se'], \"mode should be ir or ir_se\"\n blocks = get_blocks(num_layers)\n\n if mode == 'ir':\n unit_module = bottleneck_IR\n elif mode == 'ir_se':\n unit_module = bottleneck_IR_SE\n self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),\n BatchNorm2d(64),\n PReLU(64))\n # if input_size[0] == 112:\n # self.output_layer = Sequential(BatchNorm2d(512),\n # Dropout(),\n # Flatten(),\n # Linear(512 * 7 * 7, 512),\n # BatchNorm1d(512))\n # else:\n # self.output_layer = Sequential(BatchNorm2d(512),\n # Dropout(),\n # Flatten(),\n # Linear(512 * 14 * 14, 512),\n # BatchNorm1d(512))\n\n modules = []\n for block in blocks:\n for bottleneck in block:\n modules.append(\n unit_module(bottleneck.in_channel,\n bottleneck.depth,\n bottleneck.stride))\n self.body = Sequential(*modules)\n\n self._initialize_weights()\n\n def forward(self, x):\n x = self.input_layer(x)\n x = self.body(x)\n # x = self.output_layer(x)\n\n return x\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.xavier_uniform_(m.weight.data)\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm1d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n nn.init.xavier_uniform_(m.weight.data)\n if m.bias is not None:\n m.bias.data.zero_()\n\nclass Attention(nn.Module):\n def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):\n super().__init__()\n self.num_heads = num_heads\n head_dim = dim // num_heads\n self.scale = head_dim ** -0.5\n\n self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(dim, dim)\n self.proj_drop = nn.Dropout(proj_drop)\n\n def forward(self, x):\n B, N, C = x.shape\n qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)\n\n attn = (q @ k.transpose(-2, -1)) * self.scale\n attn = attn.softmax(dim=-1)\n attn = self.attn_drop(attn)\n\n x = (attn @ v).transpose(1, 2).reshape(B, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x\n\nclass Mlp(nn.Module):\n def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n self.in_features = in_features\n if in_features == 512:\n self.fc1 = nn.Linear(in_features, hidden_features)\n self.act = act_layer()\n self.fc2 = nn.Linear(hidden_features, out_features)\n self.drop = nn.Dropout(drop)\n else:\n self.fc1 = nn.Conv2d(in_features, hidden_features, 1)\n self.bn1 = nn.BatchNorm2d(hidden_features)\n self.dwconv = nn.Conv2d(hidden_features, hidden_features, 3, padding=1, groups=hidden_features)\n self.bn2 = nn.BatchNorm2d(hidden_features)\n self.act = act_layer()\n self.fc2 = nn.Conv2d(hidden_features, out_features, 1)\n self.bn3 = nn.BatchNorm2d(out_features)\n self.drop = nn.Dropout(drop)\n\n def forward(self, x):\n if self.in_features == 512:\n x = self.fc1(x)\n x = self.act(x)\n x = self.drop(x)\n x = self.fc2(x)\n x = self.drop(x)\n else:\n B,N,C = x.shape\n x = x.reshape(B, int(N**0.5), int(N**0.5), C).permute(0,3,1,2)\n x = self.bn1(self.fc1(x))\n x = self.act(x)\n x = self.drop(x)\n x = self.act(self.bn2(self.dwconv(x)))\n x = self.bn3(self.fc2(x))\n x = self.drop(x)\n x = x.permute(0,2,3,1).reshape(B, -1, C)\n return x\n\nclass Block(nn.Module):\n def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,\n drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):\n super().__init__()\n self.norm1 = norm_layer(dim)\n self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)\n # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n self.norm2 = norm_layer(dim)\n mlp_hidden_dim = int(dim * mlp_ratio)\n self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n\n def forward(self, x):\n x = x + self.drop_path(self.attn(self.norm1(x)))\n x = x + self.drop_path(self.mlp(self.norm2(x)))\n return x\n\nclass Transformer(nn.Module):\n def __init__(self, base_dim, depth, heads, mlp_ratio,\n drop_rate=.0, attn_drop_rate=.0, drop_path_prob=None, use_mask=False, masked_block=None):\n super(Transformer, self).__init__()\n self.layers = nn.ModuleList([])\n self.depth = depth\n embed_dim = base_dim * heads\n\n if drop_path_prob is None:\n drop_path_prob = [0.0 for _ in range(depth)]\n\n if use_mask==True:\n assert masked_block is not None\n self.blocks = nn.ModuleList()\n for i in range(depth):\n if i < masked_block:\n self.blocks.append(Block(\n dim=embed_dim,\n num_heads=heads,\n mlp_ratio=mlp_ratio,\n qkv_bias=True,\n drop=drop_rate,\n attn_drop=attn_drop_rate,\n drop_path=drop_path_prob[i],\n norm_layer=partial(nn.LayerNorm, eps=1e-6),\n use_mask=use_mask\n ))\n else:\n self.blocks.append(Block(\n dim=embed_dim,\n num_heads=heads,\n mlp_ratio=mlp_ratio,\n qkv_bias=True,\n drop=drop_rate,\n attn_drop=attn_drop_rate,\n drop_path=drop_path_prob[i],\n norm_layer=partial(nn.LayerNorm, eps=1e-6),\n use_mask=False\n ))\n else:\n self.blocks = nn.ModuleList([\n Block(\n dim=embed_dim,\n num_heads=heads,\n mlp_ratio=mlp_ratio,\n qkv_bias=True,\n drop=drop_rate,\n attn_drop=attn_drop_rate,\n drop_path=drop_path_prob[i],\n norm_layer=partial(nn.LayerNorm, eps=1e-6),\n )\n for i in range(depth)])\n\n\n def forward(self, x):\n B,C,H,W = x.shape\n x = rearrange(x, 'b c h w -> b (h w) c')\n # x = x.permute(0,2,3,1).reshape(B, H * W, C)\n for i in range(self.depth):\n x = self.blocks[i](x)\n # x = x.reshape(B, H, W, C).permute(0,3,1,2)\n x = rearrange(x, 'b (h w) c -> b c h w', h=H, w=W)\n return x\n\nclass conv_head_pooling(nn.Module):\n def __init__(self, in_feature, out_feature, stride,\n padding_mode='zeros'):\n super(conv_head_pooling, self).__init__()\n\n self.conv = nn.Conv2d(in_feature, out_feature, kernel_size=stride + 1,\n padding=stride // 2, stride=stride,\n padding_mode=padding_mode, groups=in_feature)\n\n def forward(self, x):\n\n x = self.conv(x)\n\n return x\n\nclass PosConv(nn.Module):\n # PEG from https://arxiv.org/abs/2102.10882\n def __init__(self, in_chans, embed_dim=512, stride=1):\n super(PosConv, self).__init__()\n self.proj = nn.Sequential(nn.Conv2d(in_chans, embed_dim, 3, stride, 1, bias=True, groups=embed_dim), )\n self.stride = stride\n\n def forward(self, x):#, H, W\n # B, N, C = x.shape\n # cnn_feat_token = x.transpose(1, 2).view(B, C, H, W)\n cnn_feat_token = x\n x = self.proj(cnn_feat_token)\n if self.stride == 1:\n x += cnn_feat_token\n # x = x.flatten(2).transpose(1, 2)\n return x\n\n def no_weight_decay(self):\n return ['proj.%d.weight' % i for i in range(4)]\n\nclass PoolingTransformer(nn.Module):\n def __init__(self, base_dims, depth, heads,\n mlp_ratio, num_classes=7,\n attn_drop_rate=.0, drop_rate=.0, drop_path_rate=.0, use_mask=False, masked_block=None,num_AU_patch=4):\n super(PoolingTransformer, self).__init__()\n\n total_block = sum(depth)\n block_idx = 0\n\n self.base_dims = base_dims\n self.heads = heads\n self.num_classes = num_classes\n\n self.pos_embed = nn.Parameter(torch.zeros(1, 14*14 , base_dims[0] * heads[0]))\n self.pos_drop = nn.Dropout(p=drop_rate)\n\n self.transformers = nn.ModuleList([])\n self.pools = nn.ModuleList([])\n self.pos_block = nn.ModuleList([])\n self.num_AU_patch=num_AU_patch\n self.patch_embed=nn.Conv2d(256, base_dims[0] * heads[0], 3, padding=1)\n\n for stage in range(len(depth)):\n drop_path_prob = [drop_path_rate * i / total_block\n for i in range(block_idx, block_idx + depth[stage])]\n block_idx += depth[stage]\n self.pos_block.append(\n PosConv(base_dims[stage] * heads[stage], base_dims[stage] * heads[stage])\n )\n self.transformers.append(\n Transformer(base_dims[stage], depth[stage], heads[stage],\n mlp_ratio,\n drop_rate, attn_drop_rate, drop_path_prob)\n )\n if stage < len(heads) - 1:\n self.pools.append(\n conv_head_pooling(base_dims[stage] * heads[stage],\n base_dims[stage + 1] * heads[stage + 1],\n stride=2\n )\n )\n\n self.norm = nn.LayerNorm(base_dims[-1] * heads[-1], eps=1e-6)\n self.embed_dim = base_dims[-1] * heads[-1]\n if num_AU_patch==7:\n self.gap_AU11 = nn.AdaptiveAvgPool2d(1)\n self.gap_AU12 = nn.AdaptiveAvgPool2d(1)\n self.gap_AU13 = nn.AdaptiveAvgPool2d(1)\n self.gap_AU21 = nn.AdaptiveAvgPool2d(1)\n self.gap_AU22 = nn.AdaptiveAvgPool2d(1)\n self.gap_AU23 = nn.AdaptiveAvgPool2d(1)\n self.gap_AU3 = nn.AdaptiveAvgPool2d(1)\n self.norm_AU11 = nn.LayerNorm(self.embed_dim, eps=1e-6)\n self.norm_AU12 = nn.LayerNorm(self.embed_dim, eps=1e-6)\n self.norm_AU13 = nn.LayerNorm(self.embed_dim, eps=1e-6)\n self.norm_AU21 = nn.LayerNorm(self.embed_dim, eps=1e-6)\n self.norm_AU22 = nn.LayerNorm(self.embed_dim, eps=1e-6)\n self.norm_AU23 = nn.LayerNorm(self.embed_dim, eps=1e-6)\n self.norm_AU3 = nn.LayerNorm(self.embed_dim, eps=1e-6)\n self.head_AU11= nn.Linear(self.embed_dim, 4)\n self.head_AU12= nn.Linear(self.embed_dim, 1)\n self.head_AU13= nn.Linear(self.embed_dim, 4)\n self.head_AU21= nn.Linear(self.embed_dim, 1)\n self.head_AU22= nn.Linear(self.embed_dim, 1)\n self.head_AU23= nn.Linear(self.embed_dim, 1)\n self.head_AU3= nn.Linear(self.embed_dim, 14)\n elif num_AU_patch==5:\n self.gap_AU1 = nn.AdaptiveAvgPool2d(1)\n self.gap_AU21 = nn.AdaptiveAvgPool2d(1)\n self.gap_AU22 = nn.AdaptiveAvgPool2d(1)\n self.gap_AU23 = nn.AdaptiveAvgPool2d(1)\n self.gap_AU3 = nn.AdaptiveAvgPool2d(1)\n self.norm_AU1 = nn.LayerNorm(self.embed_dim, eps=1e-6)\n self.norm_AU21 = nn.LayerNorm(self.embed_dim, eps=1e-6)\n self.norm_AU22 = nn.LayerNorm(self.embed_dim, eps=1e-6)\n self.norm_AU23 = nn.LayerNorm(self.embed_dim, eps=1e-6)\n self.norm_AU3 = nn.LayerNorm(self.embed_dim, eps=1e-6)\n self.head_AU1= nn.Linear(self.embed_dim, 5)\n self.head_AU21= nn.Linear(self.embed_dim, 1)\n self.head_AU22= nn.Linear(self.embed_dim, 1)\n self.head_AU23= nn.Linear(self.embed_dim, 1)\n self.head_AU3= nn.Linear(self.embed_dim, 14)\n elif num_AU_patch==4:\n self.gap_AU1 = nn.AdaptiveAvgPool2d(1)\n self.gap_AU21 = nn.AdaptiveAvgPool2d(1)\n self.gap_AU23 = nn.AdaptiveAvgPool2d(1)\n self.gap_AU3 = nn.AdaptiveAvgPool2d(1)\n self.norm_AU1 = nn.LayerNorm(self.embed_dim, eps=1e-6)\n self.norm_AU21 = nn.LayerNorm(self.embed_dim, eps=1e-6)\n self.norm_AU23 = nn.LayerNorm(self.embed_dim, eps=1e-6)\n self.norm_AU3 = nn.LayerNorm(self.embed_dim, eps=1e-6)\n self.head_AU1= nn.Linear(self.embed_dim, 4)\n self.head_AU21= nn.Linear(self.embed_dim, 1)\n self.head_AU23= nn.Linear(self.embed_dim, 1)\n self.head_AU3= nn.Linear(self.embed_dim, 7)\n\n elif num_AU_patch==3:\n self.gap_AU1 = nn.AdaptiveAvgPool2d(1)\n self.gap_AU2 = nn.AdaptiveAvgPool2d(1)\n self.gap_AU3 = nn.AdaptiveAvgPool2d(1)\n self.norm_AU1 = nn.LayerNorm(self.embed_dim, eps=1e-6)\n self.norm_AU2 = nn.LayerNorm(self.embed_dim, eps=1e-6)\n self.norm_AU3 = nn.LayerNorm(self.embed_dim, eps=1e-6)\n self.head_AU1= nn.Linear(self.embed_dim, 5)\n self.head_AU2= nn.Linear(self.embed_dim, 2)\n self.head_AU3= nn.Linear(self.embed_dim, 14)\n elif num_AU_patch==2:\n self.gap_upperAU = nn.AdaptiveAvgPool2d(1)\n self.gap_lowerAU = nn.AdaptiveAvgPool2d(1)\n self.norm_AU_up = nn.LayerNorm(self.embed_dim, eps=1e-6)\n self.norm_AU_low = nn.LayerNorm(self.embed_dim, eps=1e-6)\n self.upperAU_head= nn.Linear(512, 7)\n self.lowerAU_head= nn.Linear(512, 9)\n elif num_AU_patch==1:\n self.gap_AU = nn.AdaptiveAvgPool2d(1)\n self.norm_AU = nn.LayerNorm(self.embed_dim, eps=1e-6)\n self.AU_head= nn.Linear(self.embed_dim, 21)\n\n \n self.gap = nn.AdaptiveAvgPool2d(1)\n self.output_layer = Sequential( Dropout(),\n Flatten(),\n Linear(base_dims[-1] * heads[-1] * 14 * 14, base_dims[-1] * heads[-1]),\n nn.ReLU())\n # Classifier head\n if num_classes > 0:\n self.head = nn.Linear(base_dims[-1] * heads[-1], num_classes)\n else:\n self.head = nn.Identity()\n\n self.apply(self._init_weights)\n trunc_normal_(self.pos_embed, std=.02)\n\n def _init_weights(self, m):\n if isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n def learnable_PosEmbed(self,x):\n B,C,H,W = x.shape\n x = rearrange(x, 'b c h w -> b (h w) c')\n x = x + self.pos_embed\n x = rearrange(x, 'b (h w) c -> b c h w', h=H, w=W)\n return x\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'pos_embed'}\n\n def get_classifier(self):\n return self.head\n\n def reset_classifier(self, num_classes, global_pool=''):\n self.num_classes = num_classes\n if num_classes > 0:\n self.head = nn.Linear(self.embed_dim, num_classes)\n else:\n self.head = nn.Identity()\n\n def AUBranch(self,AU_x):\n B,C,H,W = AU_x.shape\n if self.num_AU_patch==7:\n AU11=AU_x[:,:, :7 , :7 ]\n AU12=AU_x[:,:, :7 ,4:10]\n AU13=AU_x[:,:, :7 ,7: ]\n AU21=AU_x[:,:,5:12, :6 ]\n AU22=AU_x[:,:,4:10,4:10]\n AU23=AU_x[:,:,5:12,8: ]\n AU3 =AU_x[:,:,6: , : ]\n AU11=self.head_AU11(self.norm_AU11(self.gap_AU11(AU11).squeeze()))\n AU12=self.head_AU12(self.norm_AU12(self.gap_AU12(AU12).squeeze()))\n AU13=self.head_AU13(self.norm_AU13(self.gap_AU13(AU13).squeeze()))\n AU21=self.head_AU21(self.norm_AU21(self.gap_AU21(AU21).squeeze()))\n AU22=self.head_AU22(self.norm_AU22(self.gap_AU22(AU22).squeeze()))\n AU23=self.head_AU23(self.norm_AU23(self.gap_AU23(AU23).squeeze()))\n AU3 =self.head_AU3(self.norm_AU3(self.gap_AU3(AU3).squeeze()))\n AU1257=torch.maximum(AU11,AU13)\n AU6=torch.maximum(AU21,AU23)\n AU_all=torch.cat((AU1257[:,:2],AU12,AU1257[:,2].view(B,-1),AU6,AU1257[:,3].view(B,-1),AU22,AU3),dim=1)\n elif self.num_AU_patch==5:\n AU1 =AU_x[:,:, :7 , : ]\n AU21=AU_x[:,:,5:12, :6 ]\n AU22=AU_x[:,:,4:10,4:10]\n AU23=AU_x[:,:,5:12,8: ]\n AU3 =AU_x[:,:,6: , : ]\n AU1=self.head_AU1(self.norm_AU1(self.gap_AU1(AU1).squeeze()))\n AU21=self.head_AU21(self.norm_AU21(self.gap_AU21(AU21).squeeze()))\n AU22=self.head_AU22(self.norm_AU22(self.gap_AU22(AU22).squeeze()))\n AU23=self.head_AU23(self.norm_AU23(self.gap_AU23(AU23).squeeze()))\n AU3 =self.head_AU3(self.norm_AU3(self.gap_AU3(AU3).squeeze()))\n AU6=torch.maximum(AU21,AU23)\n AU_all=torch.cat((AU1[:,:4],AU6,AU1[:,4].view(B,-1),AU22,AU3),dim=1)\n elif self.num_AU_patch==4:\n AU1 =AU_x[:,:, :7 , : ]\n AU21=AU_x[:,:,5:12, :6 ]\n AU23=AU_x[:,:,5:12,8: ]\n AU3 =AU_x[:,:,6: , : ]\n AU1=self.head_AU1(self.norm_AU1(self.gap_AU1(AU1).squeeze()))\n AU21=self.head_AU21(self.norm_AU21(self.gap_AU21(AU21).squeeze()))\n AU23=self.head_AU23(self.norm_AU23(self.gap_AU23(AU23).squeeze()))\n AU3 =self.head_AU3(self.norm_AU3(self.gap_AU3(AU3).squeeze()))\n AU6=torch.maximum(AU21,AU23)\n AU_all=torch.cat((AU1[:,:3],AU6,AU1[:,3].view(B,-1),AU3),dim=1)\n\n elif self.num_AU_patch==3:\n AU1=AU_x[:,:, :7 ,: ]\n AU2=AU_x[:,:,4:12,: ]\n AU3=AU_x[:,:,6: ,: ]\n AU1=self.head_AU1(self.norm_AU1(self.gap_AU1(AU1).squeeze()))\n AU2=self.head_AU2(self.norm_AU2(self.gap_AU2(AU2).squeeze()))\n AU3=self.head_AU3(self.norm_AU3(self.gap_AU3(AU3).squeeze()))\n AU_all=torch.cat((AU1[:,:4],AU2[:,0].view(B,-1),AU1[:,4].view(B,-1),AU2[:,1].view(B,-1),AU3),dim=1)\n elif self.num_AU_patch==2:\n upper_AU=AU_x[:,:, :8 ,:]\n lower_AU=AU_x[:,:, 6: ,:]\n upperAU = self.upperAU_head(self.norm_AU_up(self.gap_upperAU(upper_AU).squeeze()))\n lowerAU = self.lowerAU_head(self.norm_AU_low(self.gap_lowerAU(lower_AU).squeeze()))\n AU_all=torch.cat((upperAU, lowerAU), dim=1)\n elif self.num_AU_patch==1:\n AU_all=self.AU_head(self.norm_AU(self.gap_AU(AU_x).squeeze()))\n return AU_all\n\n def forward_features(self, x):\n x = self.patch_embed(x)\n x = self.learnable_PosEmbed(x)\n x = self.pos_drop(x)\n for stage in range(len(self.pools)):\n # x = self.pos_block[stage](x)\n x = self.transformers[stage](x)\n # x = self.pools[stage](x)\n AU_x = x\n AU_output = self.AUBranch(AU_x)\n x = self.transformers[-1](x)\n cls_features = self.norm(self.gap(x).squeeze())\n # cls_features = self.norm(self.output_layer(x))\n return cls_features,AU_output\n\n def forward(self, x):\n cls_features,AU_output = self.forward_features(x)\n output = self.head(cls_features)\n return output,AU_output\n\nclass IR50_ViT(nn.Module):\n def __init__(self, num_classes=7,ir_50_pth=None,num_AU_patch=7):\n super(IR50_ViT, self).__init__()\n\n self.embed_dim = 256\n self.num_patch = 196\n\n self.cnn = self.IR_50([112,112])\n # print(self.cnn)\n if ir_50_pth:\n self.cnn=self.load_model_weights(self.cnn,ir_50_pth)\n self.norm = nn.LayerNorm([self.num_patch, self.embed_dim])\n\n self.vit = PoolingTransformer(\n num_classes=num_classes,\n base_dims=[32, 32],\n depth=[4, 2],\n heads=[16, 16],\n mlp_ratio=4,\n num_AU_patch=num_AU_patch)\n \n def forward(self,x):\n x=self.cnn(x) # IR-50\n # x = x.flatten(2).transpose(1, 2) # BCHW -> BNC (flatten from H)\n # x = self.norm(x) # LayerNorm\n x = self.vit(x) # ViT\n return x\n def load_model_weights(self,model,model_path):\n state_dict = torch.load(model_path)\n # create new OrderedDict that does not contain `module.`\n from collections import OrderedDict\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n if k.startswith( 'output_layer' ):\n continue\n elif k.startswith('body'):\n k = k.strip()\n layer = k.split('.')[1]\n if int(layer)>20:\n continue\n new_state_dict[k] = v\n # load params\n model.load_state_dict(new_state_dict,strict=True)#使用strict=True,因为没有增加层,只是删减\n return model\n\n def IR_50(self,input_size):\n \"\"\"Constructs a ir-50 model.\n \"\"\"\n model = Backbone(input_size, 50, 'ir')\n\n return model\n","repo_name":"msy1412/ABAW4","sub_path":"models/IR_RVT_AU_plus_patch.py","file_name":"IR_RVT_AU_plus_patch.py","file_ext":"py","file_size_in_byte":27115,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"5784584465","text":"while True:\r\n n = int(input())\r\n if not n:\r\n break\r\n\r\n names = []\r\n for i in range(n):\r\n names.append(input())\r\n\r\n names.sort(key=lambda x: x[:2])\r\n print('\\n'.join(names) + '\\n')\r\n","repo_name":"JohnVicke/Kattis","sub_path":"sortin2.py","file_name":"sortin2.py","file_ext":"py","file_size_in_byte":213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72519071093","text":"from SublimeLinter.lint import Linter\n\nfrom .linters import python as linter_python\nfrom .parsers.recursive_descent import RecursiveDescent\n\nclass SublimeLinterParser(Linter):\n cmd = None # Tell SublimeLinter to call the run() method instead of running an external tool\n\n # Match all views\n defaults = {\n 'selector': 'source.python'\n }\n\n def run(self, cmd, code):\n print(\"---- RUN ---- \")\n parser = RecursiveDescent(linter_python)\n tree = parser.parse(code)\n if tree is None:\n print(\"tree = None\")\n print(\"---- END RUN ----\")\n\ndef plugin_loaded():\n print(\"Loaded!\")\n","repo_name":"bfoz/SublimeLinter-parser","sub_path":"linter.py","file_name":"linter.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23097887063","text":"import torch.utils.data\nimport numpy as np\nimport json\n#from skimage import io\n#from skimage import draw\n#import skimage.transform as sktransform\nimport os\nimport math, random, string, re\nfrom collections import defaultdict, OrderedDict\nimport timeit\nfrom data_sets.qa import QADataset, collate\n\nimport utils.img_f as img_f\n\n\nclass RVLCDIPClass(QADataset):\n \"\"\"\n Document classification on RVL-CDIP dataset\n \"\"\"\n\n\n def __init__(self, dirPath=None, split=None, config=None, images=None):\n super(RVLCDIPClass, self).__init__(dirPath,split,config,images)\n\n self.do_masks=True\n self.cased = True\n\n if split=='valid':\n split = 'val'\n\n self.str_lookup=[\n 'letter',\n 'form',\n 'email',\n 'handwritten',\n 'advertisement',\n 'scientific_report',\n 'scientific_publication',\n 'specification',\n 'file_folder',\n 'news_article',\n 'budget',\n 'invoice',\n 'presentation',\n 'questionnaire',\n 'resume',\n 'memo',]\n self.str_lookup = ['C:'+cls for cls in self.str_lookup]\n\n split_file = os.path.join(dirPath,'labels',f'{split}.txt')\n self.images=[]\n with open(split_file) as f:\n for line in f.readlines():\n path, cls = line.strip().split(' ')\n path = os.path.join(dirPath,'images',path)\n cls = int(cls)\n qa=[]\n self.qaAdd(qa,'classify>',self.str_lookup[cls])\n self.images.append({'imageName':path,'imagePath':path,'annotationPath':cls,'qa':qa})\n\n\n if split=='val':\n #takes too long to go through whole val set during training, shorten. 1/5?\n self.images = self.images[::5]\n\n\n\n\n def parseAnn(self,class_index,s):\n class_str = self.str_lookup[class_index]\n qas=[]\n self.qaAdd(qas,'classify>',class_str)\n return None,None,None,None, qas\n\n","repo_name":"herobd/dessurt","sub_path":"data_sets/rvl_cdip_class.py","file_name":"rvl_cdip_class.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"21"} +{"seq_id":"1178693838","text":"import sys\r\nimport sqlite3\r\nimport os\r\n\r\nimport random\r\nfrom PyQt5.QtCore import Qt\r\nfrom PyQt5 import QtGui, QtWidgets, uic, QtCore, QtMultimedia\r\n\r\nfrom log import StartPage\r\nfrom track_widget import TrackWidget\r\n\r\n\r\nclass MainWindow(QtWidgets.QMainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n # инициализация и стиили окна\r\n\r\n # безрамочное окно\r\n self.setWindowFlags(QtCore.Qt.FramelessWindowHint)\r\n\r\n self.id = False\r\n\r\n self.hide()\r\n\r\n #! режим (почти)разработчика (если False)\r\n if 1:\r\n\r\n startpage = StartPage(self)\r\n startpage.exec()\r\n\r\n if self.id is False:\r\n sys.exit()\r\n\r\n # id пользователя\r\n\r\n # загрузка .ui и .db файлов\r\n BASE_DIR = os.path.dirname(os.path.abspath(__file__))\r\n\r\n ui_path = os.path.join(BASE_DIR, \"ui/main.ui\")\r\n\r\n db_path = os.path.join(BASE_DIR, \"database/music_db.db\")\r\n\r\n uic.loadUi(ui_path, self)\r\n\r\n # sqlite3\r\n\r\n self.con = sqlite3.connect(db_path)\r\n\r\n self.cur = self.con.cursor()\r\n\r\n # стили и подключение кнопки выхода и сворачивания окна\r\n\r\n self.exit.setIcon(QtGui.QIcon('icons/X.png'))\r\n self.exit.clicked.connect(self.close_programm)\r\n\r\n self.role_up.setIcon(QtGui.QIcon('icons/role_up.png'))\r\n self.role_up.clicked.connect(self.role_up_programm)\r\n\r\n # стили и подключение кнопок боковой панели\r\n\r\n self.home.setIcon(QtGui.QIcon('icons/home.png'))\r\n self.home.setIconSize(QtCore.QSize(35, 35))\r\n\r\n self.search.setIcon(QtGui.QIcon('icons/search.png'))\r\n self.search.setIconSize(QtCore.QSize(30, 30))\r\n\r\n self.my_music.setIcon(QtGui.QIcon('icons/mymusic.png'))\r\n self.my_music.setIconSize(QtCore.QSize(30, 30))\r\n\r\n self.home.clicked.connect(self.change_tab)\r\n\r\n self.search.clicked.connect(self.change_tab)\r\n\r\n self.my_music.clicked.connect(self.change_tab)\r\n\r\n #! Создание плеера\r\n self.player = QtMultimedia.QMediaPlayer()\r\n\r\n self.current_track = False\r\n self.player.durationChanged.connect(self.change_duration)\r\n self.player.positionChanged.connect(self.position_changed)\r\n self.slider.valueChanged.connect(self.end_of_track)\r\n self.slider.sliderMoved.connect(self.set_position)\r\n self.sound.setMaximum(100)\r\n self.sound.setValue(50)\r\n self.sound.valueChanged.connect(self.sound_changed)\r\n self.player.setVolume(self.sound.value())\r\n self.pause = True\r\n\r\n self.slider.setEnabled(False)\r\n\r\n # стили и подключение кнопок нижней панели\r\n self.record.setIcon(QtGui.QIcon('icons/record.png'))\r\n\r\n self.previous_track.setIcon(QtGui.QIcon('icons/previous.png'))\r\n self.previous_track.clicked.connect(self.previous)\r\n\r\n self.play_pause.setIcon(QtGui.QIcon('icons/play.png'))\r\n self.play_pause.clicked.connect(self.play_pause_func)\r\n\r\n self.next_track.setIcon(QtGui.QIcon('icons/next.png'))\r\n self.next_track.clicked.connect(self.next)\r\n\r\n self.tab1_create()\r\n self.tab2_create()\r\n self.tab3_create()\r\n\r\n #! TAB 1\r\n\r\n # tab1 по умолчанию\r\n self.tab.setCurrentWidget(self.tab1)\r\n\r\n def tab1_create(self):\r\n\r\n self.scroll.verticalScrollBar().setStyleSheet('width: 0px;')\r\n self.scroll.setWidgetResizable(True)\r\n\r\n self.widget = QtWidgets.QWidget(self)\r\n\r\n self.layout = QtWidgets.QVBoxLayout()\r\n self.layout.addSpacing(10)\r\n\r\n self.widget.setLayout(self.layout)\r\n\r\n result = self.cur.execute('''SELECT id FROM all_music''').fetchall()\r\n\r\n # создание TrackWidget для рандомных треков\r\n lst = []\r\n self.firts_tab_result = []\r\n while len(lst) < 10:\r\n num = [*random.choice(list(result))][0]\r\n\r\n if not num in lst:\r\n track = TrackWidget(num, self)\r\n track.setParent(self.widget)\r\n track.setFixedSize(412, 74)\r\n self.layout.addWidget(track)\r\n lst.append(num)\r\n self.firts_tab_result.append(track)\r\n\r\n self.scroll.setWidget(self.widget)\r\n self.scroll.setWidgetResizable(True)\r\n\r\n #! TAB 2\r\n\r\n def tab2_create(self):\r\n\r\n # спрятывание scrollbar'a\r\n self.search_scroll.verticalScrollBar().setStyleSheet('width: 0px;')\r\n\r\n self.search_scroll.setWidgetResizable(True)\r\n\r\n self.search_btn.setStyleSheet(\r\n 'border-radius: 10px; background-color: #1db954;')\r\n\r\n self.search_widget = QtWidgets.QWidget()\r\n\r\n self.search_layout = QtWidgets.QVBoxLayout()\r\n\r\n # не дает виджетам накладываться друг на друга\r\n self.search_layout.addSpacing(10)\r\n\r\n # получение запроса из поисковой строки\r\n self.search_widget.setLayout(self.search_layout)\r\n\r\n # отправление запроса в функцию self5.show_result\r\n self.search_btn.clicked.connect(self.show_result)\r\n\r\n #! TAB3\r\n def tab3_create(self):\r\n self.my_music_scroll.verticalScrollBar().setStyleSheet('width: 0px;')\r\n self.my_music_scroll.setWidgetResizable(True)\r\n\r\n my_music_widget = QtWidgets.QWidget(self)\r\n my_music_layout = QtWidgets.QVBoxLayout()\r\n my_music_layout.addSpacing(10)\r\n\r\n my_music_widget.setLayout(my_music_layout)\r\n\r\n command = f'''SELECT music_id FROM 'user_{self.id}' '''\r\n result = self.cur.execute(command).fetchall()\r\n self.second_tab_result = []\r\n for i in result:\r\n music_id = i[0]\r\n track = TrackWidget(music_id, self)\r\n track.setFixedSize(412, 74)\r\n my_music_layout.addWidget(track)\r\n self.second_tab_result.append(track)\r\n self.my_music_scroll.setWidget(my_music_widget)\r\n\r\n # выводит на tab2 все треки подходящие по запросу\r\n\r\n def show_result(self):\r\n\r\n # предварительное удаление всех виджетов с tab2.search_scroll\r\n if self.search_layout.count() > 1:\r\n for i in reversed(range(1, self.search_layout.count())):\r\n self.search_layout.itemAt(i).widget().setParent(None)\r\n\r\n # запрос для базы данных\r\n response = self.search_line.text()\r\n result = self.cur.execute(\r\n f'''SELECT id FROM all_music WHERE title LIKE '%{response}%' or author LIKE '%{response}%' ''').fetchall()\r\n self.third_tab_result = []\r\n # добавление результатов на tab2.search_scroll\r\n for i in result:\r\n\r\n track = TrackWidget(i[0], self)\r\n track.setFixedSize(412, 74)\r\n self.search_layout.addWidget(track)\r\n self.third_tab_result.append(track)\r\n\r\n self.search_scroll.setWidget(self.search_widget)\r\n self.search_scroll.setWidgetResizable(True)\r\n\r\n # переключает TabWidget между tab1, tab2, tab3\r\n def change_tab(self):\r\n\r\n if self.sender() == self.home:\r\n self.tab.setCurrentWidget(self.tab1)\r\n self.tab1_create()\r\n\r\n if self.sender() == self.search:\r\n self.tab.setCurrentWidget(self.tab2)\r\n\r\n if self.sender() == self.my_music:\r\n self.tab.setCurrentWidget(self.tab3)\r\n\r\n def play_pause_func(self):\r\n\r\n if self.pause:\r\n self.play_func()\r\n else:\r\n self.pause_func()\r\n\r\n def play_func(self):\r\n self.player.setPosition(self.position)\r\n self.player.play()\r\n self.pause = False\r\n\r\n def pause_func(self):\r\n self.position = self.slider.value()\r\n self.player.stop()\r\n self.slider.setSliderPosition(self.position)\r\n self.pause = True\r\n\r\n def change_duration(self, duration_ms):\r\n self.slider.setMaximum(duration_ms)\r\n\r\n def set_position(self, pos):\r\n self.player.setPosition(pos)\r\n\r\n def position_changed(self, pos):\r\n self.slider.setValue(pos)\r\n\r\n def previous(self):\r\n\r\n par = self.current_track.parent()\r\n slide = par.children()\r\n index = slide.index(self.current_track)\r\n if index > 1:\r\n previous_track = slide[index - 1]\r\n previous_track.play()\r\n else:\r\n previous_track = slide[-1]\r\n previous_track.play()\r\n\r\n\r\n def next(self):\r\n\r\n par = self.current_track.parent()\r\n slide = par.children()\r\n index = slide.index(self.current_track)\r\n if len(slide) - 1 > index > 0:\r\n next_track = slide[index + 1]\r\n next_track.play()\r\n else:\r\n next_track = slide[1]\r\n next_track.play()\r\n\r\n def sound_changed(self):\r\n self.player.setVolume(self.sound.value())\r\n\r\n def end_of_track(self):\r\n if self.player.position() == self.slider.maximum():\r\n self.next()\r\n\r\n #! НЕ ТРОЖБ~\r\n # позволяет перемещать безрамочное окно\r\n\r\n #! --------------------------------\r\n def mousePressEvent(self, event):\r\n if event.button() == Qt.LeftButton:\r\n self.old_pos = event.pos()\r\n\r\n def mouseReleaseEvent(self, event):\r\n if event.button() == Qt.LeftButton:\r\n self.old_pos = None\r\n\r\n def mouseMoveEvent(self, event):\r\n if not self.old_pos:\r\n return\r\n delta = event.pos() - self.old_pos\r\n self.move(self.pos() + delta)\r\n #! --------------------------------\r\n\r\n # закрытие и сворачивание программы\r\n def close_programm(self):\r\n sys.exit()\r\n\r\n def role_up_programm(self):\r\n self.setWindowState(self.windowState() | Qt.WindowMinimized)\r\n\r\n\r\ndef except_hook(cls, exception, traceback):\r\n sys.__excepthook__(cls, exception, traceback)\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QtWidgets.QApplication(sys.argv)\r\n\r\n programm = MainWindow()\r\n programm.show()\r\n sys.excepthook = except_hook\r\n sys.exit(app.exec())\r\n","repo_name":"KapKapkin/music_prjct","sub_path":"musicproject/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2642260878","text":"import numpy as np\nimport pandas as pd\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport warnings\nwarnings.filterwarnings(action=\"once\")\n\ndef main():\n # Import Data\n df = pd.read_csv(\"./data/mpg_ggplot2.csv\")\n df_select = df.loc[df.cyl.isin([4, 8]), :]\n\n # Plot\n sns.set_style(\"white\")\n gridobj = sns.lmplot(x=\"displ\", y=\"hwy\", hue=\"cyl\", data=df_select,\n height=7, aspect=1.6, robust=True, palette='tab10',\n scatter_kws=dict(s=60, linewidths=.7, edgecolors='black'))\n\n # Decorations\n gridobj.set(xlim=(0.5, 7.5), ylim=(0, 50))\n plt.title(\"Scatterplot with line of best fit grouped by number of cylinders\", fontsize=20)\n plt.savefig('./jpg/line_regression.jpg')\n plt.show()\n\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"skygx/visualable","sub_path":"line_regession_v1.py","file_name":"line_regession_v1.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31128064384","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n self.pre = None\n\nclass DoubleLinkList:\n def __init__(self):\n self.head = None\n\n def printList_Forward_direction(self): #Traversal Example\n if self.head is None:\n print(\"LinkedList is empty\")\n else:\n temp=self.head\n while temp is not None:\n print(temp.data,\"-->\",end=\" \")\n temp = temp.next\n\n def printList_backward_direction(self): #Reverse\n if self.head is None:\n print(\"LinkList is Empty\")\n else:\n temp=self.head\n while temp.next is not None: #We Reach to the Last Node\n temp=temp.next\n while temp is not None:\n print(temp.data,\"-->\",end=\" \")\n temp=temp.pre\n\n def insert_LinkList_Empty(self,data):\n if self.head is None:\n new_node=Node(data)\n self.head=new_node\n else:\n print(\"LinkList is Not Empty!\")\n\n def insert_Begning(self,data):\n new_Node=Node(data)\n if self.head is None:\n new_Node=self.head\n else:\n new_Node.next=self.head\n self.head.pre=new_Node\n self.head=new_Node\n\n def insert_End(self,data):\n new_node=Node(data)\n if self.head is None:\n self.head=new_node\n else:\n temp=self.head\n while temp.next is not None:\n temp=temp.next\n temp.next=new_node\n new_node.pre = temp\n\n def add_after(self,data,x):\n if self.head is None:\n print(\"Empty LinkList\")\n else:\n temp=self.head\n while temp is not None:\n if x==temp.data:\n break\n else:\n temp=temp.next\n if temp.next is None:\n print(\"LinkList is not found!!\")\n else:\n new_node=Node(data)\n new_node.next=temp.next\n new_node.pre=temp\n if temp.next is not None:\n temp.next.pre=new_node\n temp.next=new_node\n\n def add_Before(self,data,x):\n if self.head is None:\n print(\"Empty LinkList\")\n else:\n temp=self.head\n while temp is not None:\n if x==temp.data:\n break\n else:\n temp=temp.next\n if temp.next is None:\n print(\"LinkList is not found!!\")\n else:\n new_node=Node(data)\n new_node.next=temp\n new_node.pre=temp.pre\n if temp.pre is not None:\n temp.pre.next=new_node\n temp.pre=new_node\n\n\ndll=DoubleLinkList()\n\ndll.insert_End(30)\ndll.insert_LinkList_Empty(10)\ndll.insert_Begning(20)\nprint(dll.printList_Forward_direction())","repo_name":"harshitsingh20/PythonNotes-All-Program-","sub_path":"226 DSA DoubleLinkList(Inseartion).py","file_name":"226 DSA DoubleLinkList(Inseartion).py","file_ext":"py","file_size_in_byte":2999,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"6403174266","text":"import os\n\nfrom flask import Flask, render_template, request, flash, jsonify\nfrom flask_login import LoginManager, current_user\n\nfrom app.models import db, User\nfrom app.auth import auth\n\napp = Flask(__name__)\napp.secret_key = os.urandom(32)\napp.register_blueprint(auth)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///./static/data/database.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = 'False'\n\ndb.init_app(app)\nwith app.app_context():\n db.create_all()\n\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nlogin_manager.login_view = 'auth.login'\nlogin_manager.login_message = 'Log in to view this page.'\nlogin_manager.login_message_category = 'danger'\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(user_id)\n\n@app.route('/')\ndef root():\n return render_template('index.html')\n\nif __name__ == \"__main__\":\n app.debug = True\n app.run()","repo_name":"Xyresic/FlaskTemplate","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32425916670","text":"# Write a program to find second biggest number out of three positive numbers, given by user\n\na = int(input(\"Enter the 1st number : \"))\nb = int(input(\"Enter the 2nd number : \"))\nc = int(input(\"Enter the 3rd number : \"))\n\nif (a > b and a < c) or (a > c and a < b):\n print(\"The 2nd greatest number is 1st number : \", a)\n\nif (b > a and b < c) or (b > c and b < a):\n print(\"The 2nd greatest number is 2nd number : \", b)\n\nif (c > b and c < a) or (c > a and c < b):\n print(\"The 2nd greatest number is 3rd number : \", c)\n","repo_name":"Memon-Sabir-au28/python-practise","sub_path":"prc8.py","file_name":"prc8.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34142771591","text":"# -*- coding: utf-8 -*-\n# Flags\n\n# set true if json file has to be created\npre_processing_required = False\n# if noun phrases have to be created then set it to true\nNP_generation_required = False\n\n# Files\ndata_folder = \"data/\"\ndata_csv = data_folder + \"data.csv\"\ndata_json = data_folder + \"data.json\"\ndata_np = data_folder + \"NP.json\"\ncsv_delimiter = \"ā\"\n\n# table\ntable_size = 6 #data size of original data\ntable_time = 0\ntable_problem = 1\ntable_solution = 2\ntable_domain = 3 #eg open/login\ntable_problem_type = 4 #how do I comes here\ntable_cid = 5\n\n","repo_name":"alseambusher/calls_analyser","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21944823939","text":"import numpy as np\n\ndef updateScores(grid, i, j, p, max_l=5):\n # Get subgrid centered around where was just played\n sg, r, c, window = extractSubgrid(grid, i, j, max_l)\n # Check all \n p.score += check_row(sg, r, p, max_l)\n p.score += check_col(sg, c, p, max_l)\n p.score += check_hills(sg, r, c, p, max_l)\n p.score += check_dales(sg, r, c, p, max_l)\n writeSubgrid(sg, grid, window) # Write back the updated grid to the main one\n #return grid\n\ndef extractSubgrid(g, i, j, max_l):\n rmax, cmax = g.shape\n rstart, rend = max(0, i-(max_l-1)), min(rmax, i+(max_l))\n cstart, cend = max(0, j-(max_l-1)), min(cmax, j+(max_l))\n rl, cl = i-rstart, j-cstart # Verify the local index mapping\n window = (rstart, rend, cstart, cend)\n assert rend-rstart >= max_l # Check the size of the new\n assert cend-cstart >= max_l\n return g[rstart:rend, cstart:cend], rl, cl, window\n\n\ndef writeSubgrid(sg, g, w):\n # w is (rstart, rend, cstart, cend)\n g[w[0]:w[1], w[2]:w[3]] = sg\n\n\n# Row check\ndef check_row(g, r, p, max_l):\n \"\"\"[MAIN ALGO] If something has to be moded here, probably in the others too.\n Returns nothing, modifies p's score in place if needed\n grid g is also modified in place\n \"\"\"\n points = 0\n _, cmax = g.shape\n s = sum(g[r, :max_l-1]) + g[r, 0] # Double first elem and rm last\n for k in range(cmax-max_l+1): # Iterate on cols\n s += g[r, max_l-1+k] # Add head\n s -= g[r, k] # Remove tail\n if s in p.win:\n #print('Row win on:', k, c, g)\n g[r, k:max_l+k] = p.pc2 # Assigns values\n points += 1\n if k == 0 and cmax == 2*max_l-1: # CORNER CASE: what if you joined a whole line?\n s = sum(g[r, max_l-1:])\n if s in p.win: # Only works if all other symbols are not assigned\n g[r, max_l-1:] = p.pc2 # Assigns values\n #p['score'] += 1\n points += 1\n break\n else: # That is the only point you could score in that line\n break\n return points\n\n\n# Col check\ndef check_col(g, c, p, max_l):\n \"\"\"Returns nothing, modifies p's score in place if needed\n grid g is also modified in place\n \"\"\"\n points = 0\n rmax, _ = g.shape\n s = sum(g[:max_l-1, c]) + g[0, c] # Double first elem and rm last\n for k in range(rmax-max_l+1): # Iterate on rows\n s += g[max_l-1+k, c] # Add head\n s -= g[k, c] # Remove tail\n if s in p.win:\n #print('Col win on:', k, c, g)\n g[k:max_l+k, c] = p.pc2 # Assigns values\n points += 1\n if k == 0 and rmax == 2*max_l-1: # CORNER CASE: what if you joined a whole line?\n s = sum(g[max_l-1:, c])\n if s in p.win: # Only works if all other symbols are not assigned\n g[max_l-1:, c] = p.pc2\n points += 1\n break\n else: # That is the only point you could score in that line\n break\n return points # 0, 1 or 2\n\n\ndef edgeHills(g, r, c):\n rmax, cmax = g.shape\n rmax -= 1\n cmax -= 1\n \n # Coordinates of two extreme points\n kmax = min(r, cmax-c) # max number of steps in that direction\n upper_right = (r-kmax, c+kmax)\n kmax = min(rmax-r, c)\n bottom_left = (r+kmax, c-kmax)\n return upper_right, bottom_left\n\ndef test_edgeHills():\n s = 9\n test = np.zeros((s, s))\n row = {0, s-1}\n col = {0, s-1}\n for i in range(s):\n for j in range(s):\n ur, bl = edgeHills(test, i, j)\n assert ur[0] in row or ur[1] in col\n assert bl[0] in row or bl[1] in col\n assert ur[0]+ur[1] == i+j == bl[0]+bl[1]\n return True\ntest_edgeHills()\n\ndef edgeDales(g, r, c):\n rmax, cmax = g.shape\n rmax -= 1\n cmax -= 1\n \n # Coordinates of two extreme points\n kmax = min(rmax-r, cmax-c)\n bottom_right = (r+kmax, c+kmax)\n kmax = min(r, c)\n upper_left = (r-kmax, c-kmax)\n return bottom_right, upper_left\n\ndef test_edgeDales():\n s = 9\n test = np.zeros((s, s))\n row = {0, s-1}\n col = {0, s-1}\n for i in range(s):\n for j in range(s):\n ur, bl = edgeDales(test, i, j)\n assert ur[0] in row or ur[1] in col\n assert bl[0] in row or bl[1] in col\n assert ur[0]-ur[1] == i-j == bl[0]-bl[1]\n return True\ntest_edgeDales()\n\ndef check_hills(g, r, c, p, max_l):\n points = 0\n ur, bl = edgeHills(g, r, c)\n # Isolate what we will be working on\n hill_length = (bl[0] - ur[0]) + 1 # |slope| = 1 so we can use any distance\n coord = [(bl[0]-k, bl[1]+k) for k in range(hill_length)] # All coords to select\n if len(coord) < max_l: return points # No way we can score\n hill = [g[k] for k in coord] # Back to simple 1D format\n s = sum(hill[:max_l-1]) + hill[0] # Double first elem and rm last\n \n for k in range(hill_length - (max_l-1)): # Iterate on distance\n s += hill[max_l-1+k] # Add head\n s -= hill[k] # Remove tail\n if s in p.win:\n # Reassign values to g\n cwin = coord[k:max_l+k]\n #print('Hill win on:', cwin)\n for c_ in cwin:\n g[c_] = p.pc2\n # g[k:max_l+k, c] = p.pc2 # Assigns values\n #p['score'] += 1\n points += 1\n if k == 0 and hill_length == 2*max_l-1: # CORNER CASE: what if you joined a whole line?\n s = sum(hill[max_l-1:])\n if s in p.win: # Only works if all other symbols are not assigned\n cwin = coord[max_l-1:]\n for c_ in cwin:\n g[c_] = p.pc2\n #p['score'] += 1\n points += 1\n break\n else: # That is the only point you could score in that line\n break\n \n return points # 0, 1 or 2\n\ndef check_dales(g, r, c, p, max_l):\n points = 0\n br, ul = edgeDales(g, r, c)\n # Isolate what we will be working on\n dale_length = (br[1] - ul[1]) + 1 # |slope| = 1 so we can use any distance\n coord = [(ul[0]+k, ul[1]+k) for k in range(dale_length)] # All coords to select\n if len(coord) < max_l: return points # No way we can score - abort\n dale = [g[k] for k in coord] # Back to simple 1D format\n s = sum(dale[:max_l-1]) + dale[0] # Double first elem and rm last\n \n for k in range(dale_length - (max_l-1)): # Iterate on distance\n s += dale[max_l-1+k] # Add head\n s -= dale[k] # Remove tail\n if s in p.win:\n # Reassign values to g\n cwin = [coord[max_l-1+k-j] for j in range(max_l)]\n #print('Dale win on:', cwin)\n for c_ in cwin:\n g[c_] = p.pc2\n # g[k:max_l+k, c] = p.pc2 # Assigns values\n #p['score'] += 1\n points += 1\n if k == 0 and dale_length == 2*max_l-1: # CORNER CASE: what if you joined a whole line?\n s = sum(dale[max_l-1:])\n \n if s in p.win: # Only works if all other symbols are not assigned\n cwin = coord[max_l-1:]\n for c_ in cwin:\n g[c_] = p.pc2\n #p['score'] += 1\n points += 1\n break\n else: # That is the only point you could score in that line\n break\n return points\n","repo_name":"AlexBdx/TicTacToe","sub_path":"TicTacToe/boardCheck.py","file_name":"boardCheck.py","file_ext":"py","file_size_in_byte":7484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40744790178","text":"import argparse\nimport pyspark\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import col,lit,sum,udf,when\nfrom pyspark.sql.types import StringType,IntegerType\n\ndef convert_date(x):\n if '/' in x:\n ele = x.split('/')\n else:\n ele = x.split('-')\n if len(ele[0]) == 4:\n return ele[0] + '-' + ele[1] + '-' + ele[2]\n return ele[2] + '-' + ele[1] + '-' + ele[0]\n\ndef convert_vnd_to_dollar(x):\n return int(x) * 40\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--exe_date\")\nargs = parser.parse_args()\n\nexe_date = \"\"\nif args.exe_date:\n\texe_date = args.exe_date\n\nrunTime = exe_date.split(\"-\")\nyear = runTime[0]\nmonth = runTime[1]\nday = runTime[2]\n\nspark = SparkSession.builder \\\n .master('local[*]') \\\n .appName(\"Daily Report2\") \\\n .config('hive.metastore.urls','thrift://localhost:9083') \\\n .config('hive.exec.dynamic.partition','true') \\\n .config('hive.exec.dynamic.partition.mode',\"nonstrict\") \\\n .enableHiveSupport() \\\n .getOrCreate()\n\nordersDf = spark.read.parquet(\"hdfs://master:9000/datalake/orders\").drop(\"year\",\"month\",\"day\")\norderDetailDf = spark.read.parquet(\"hdfs://master:9000/datalake/order_detail\").drop(\"year\",\"month\",\"day\")\nproductsDf = spark.read.parquet(\"hdfs://master:9000/datalake/product\").drop(\"year\",\"month\",\"day\",\"created_at\")\ninventoryDf = spark.read.parquet(\"hdfs://master:9000/datalake/inventory\").drop(\"year\",\"month\",\"day\")\n\n#get null df\nnull_df_orders = ordersDf.filter(col(\"quantity\").isNull() | col(\"created_at\").isNull())\n#null_df_ordersDetail = orderDetailDf.filter(col(\"user_id\").isNull() | col(\"total\").isNull())\ndf_null = null_df_orders.join(orderDetailDf, null_df_orders[\"product_id\"] == orderDetailDf[\"order_id\"],'left_outer')\n\n#remove null and convert date\ndateUdf = udf(convert_date,StringType())\nordersDf = ordersDf.na.drop()\nordersDf.filter(col(\"quantity\").isNull() | col(\"created_at\").isNull()).show()\nordersDf = ordersDf.withColumn(\"created_at\",dateUdf(col(\"created_at\")))\n\n#remove null and convert dollar\ndollarUdf = udf(convert_vnd_to_dollar,IntegerType())\n#orderDetailDf = orderDetailDf.na.drop()\norderDetailDf = orderDetailDf.withColumn(\"total\",when(col(\"unit\") == \"VND\",dollarUdf(col(\"total\"))).otherwise(col(\"total\")))\n\n\npreDF = ordersDf.filter(ordersDf[\"created_at\"] == exe_date) \\\n .join(orderDetailDf, ordersDf[\"product_id\"] == orderDetailDf[\"order_id\"],\"inner\") \\\n .join(productsDf,ordersDf[\"product_id\"] == productsDf[\"id\"],\"inner\") \\\n .join(inventoryDf.select(col(\"quantity\"). \\\n alias(\"inv_quantity\"),col(\"id\")),productsDf[\"inventory_id\"] \\\n == inventoryDf[\"id\"],\"inner\")\n\n\nmapDf = preDF.groupBy(\"Make\",\"Model\",\"Category\",\"product_id\",\"inv_quantity\") \\\n .agg(sum(\"quantity\").alias(\"Sales\"),sum(\"total\").alias(\"Revenue\"))\n\nresultDf = mapDf.withColumn(\"LeftOver\",col(\"inv_quantity\") - col(\"Sales\")) \\\n .withColumn(\"year\",lit(year)) \\\n .withColumn(\"month\",lit(month)) \\\n .withColumn(\"day\",lit(day)) \\\n .select(\"Make\",\"Model\",\"Category\",\"Sales\",\"Revenue\",\"LeftOver\",\"year\",\"month\",\"day\").limit(20);\n\nspark.sql(\"CREATE DATABASE IF NOT EXISTS reports\")\n\nresultDf.write \\\n .format(\"hive\") \\\n .partitionBy(\"year\",\"month\",\"day\") \\\n .mode(\"append\") \\\n .saveAsTable(\"reports.daily_gross_revenue\")\n\nprint(\"----------------------------DONE!!-----------------------------------\")\ntblLocation = \"hdfs://master:9000/errors/order_error\"\n\ndf_null.drop(\"id\").write. \\\n\tmode(\"append\") \\\n\t.parquet(tblLocation)\n\n\n#resultDf.write \\\n#\t\t\t\t.format(\"hive\") \\\n#\t\t\t\t.partitionBy(\"year\",\"month\",\"day\") \\\n#\t\t\t\t.mode(\"append\") \\\n#\t\t\t\t.parquet(tblLocation)\n","repo_name":"hoangphu7122002/datawarehouse","sub_path":"spark_etl_script/etl.py","file_name":"etl.py","file_ext":"py","file_size_in_byte":3723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5380492577","text":"from vec2d import *\nfrom game_tools import *\nfrom transformations import *\n\nclass Mobject:\n def __init__(self, parent, **kwargs):\n self.parent = parent\n self.currentMovements = []\n self.mobjects = []\n self.params.update(kwargs)\n for name, value in self.params.items():\n if type(value) == list or type(value) == tuple:\n value = np.array(value)\n setattr(self, name, value)\n def toPixel(self, coords):\n return self.parent.toPixel(coords)\n def move(self):\n for movement in self.currentMovements:\n movement.step()\n if movement.t >= movement.time:\n self.currentMovements.remove(movement)\n def rotateBy(self, angle, axis, time, delay=0, ratefunc=SmoothMove()):\n self.currentMovements.append(\n Rotation(\n parent = self,\n angle = angle,\n axis = axis,\n time = time,\n delay = delay,\n ratefunc = ratefunc\n )\n )\n def changeColorTo(self, endColor, time, delay=0, ratefunc=ConstantRF()):\n self.currentMovements.append(\n ChangeColor(\n parent = self,\n endColor = endColor,\n time = time,\n delay = delay,\n ratefunc = ratefunc\n )\n )\n\nclass Line(Mobject):\n params = {\n 'start': (0, 0),\n 'end': (10, 10),\n 'color': white,\n 'width': 0.001,\n 'draw_start_arrow': False,\n 'draw_end_arrow': False,\n 'arrowWidth': 0.1,\n 'arrowLength': 0.15\n }\n\n def __init__(self, parent, **kwargs):\n Mobject.__init__(self, parent, **kwargs)\n self.update_vectors()\n self.end_arrow_base = self.end - self.arrowLength * self.unit_vector # need to restructure this a bit\n # self.create_arrows() #need to include 'Triangle' class first\n self.update_vertices()\n\n def update_vectors(self):\n self.vector = r_vec(self.start, self.end)\n self.unit_vector = norm(r_vec(self.start, self.end))\n self.normal_vector = rotate(self.unit_vector, np.pi / 2)\n\n def update_vertices(self):\n self.update_vectors()\n self.end_arrow_base = self.end - self.arrowLength * self.unit_vector\n\n p1 = self.end_arrow_base + self.width / 2 * self.normal_vector\n p2 = self.end_arrow_base - self.width / 2 * self.normal_vector\n p3 = self.start - self.width / 2 * self.normal_vector\n p4 = self.start + self.width / 2 * self.normal_vector\n\n self.vertices = np.array([p1, p2, p3, p4])\n self.midpoint = self.get_midpoint()\n if self.draw_start_arrow:\n self.start_arrow.vertices = self.get_start_arrow_vertices()\n if self.draw_end_arrow:\n self.end_arrow.vertices = self.get_end_arrow_vertices()\n\n def get_midpoint(self):\n return self.start + (self.end - self.start) / 2\n\n def get_start_arrow_vertices(self):\n return np.array([self.start_arrow_base + self.normal_vector * self.arrowWidth,\n self.start_arrow_base - self.normal_vector * self.arrowWidth,\n self.start_arrow_base + self.unit_vector * self.arrowLength])\n\n def get_end_arrow_vertices(self):\n return np.array([self.end_arrow_base + self.normal_vector * self.arrowWidth,\n self.end_arrow_base - self.normal_vector * self.arrowWidth,\n self.end_arrow_base + self.unit_vector * self.arrowLength])\n\n def create_arrows(self):\n if self.draw_start_arrow:\n self.start = self.start + self.unit_vector * self.arrowLength\n self.start_arrow = Triangle(params={'vertices': self.get_start_arrow_vertices(), 'color': self.color})\n if self.draw_end_arrow:\n self.end_arrow = Triangle(params={'vertices': self.get_end_arrow_vertices(), 'color': self.color})\n\n def move(self):\n self.pointlist = np.array([self.start, self.end])\n Mobject.move(self)\n self.start, self.end = self.pointlist\n\n self.update_vertices()\n\n def draw(self, display):\n color = self.color.astype(int)\n\n if self.draw_start_arrow:\n self.start_arrow.color = color\n if self.draw_end_arrow:\n self.end_arrow.color = color\n\n # converted_vertices = list(map(lambda x: parent.toPixel().astype(int), self.vertices))\n\n convertedPoints = []\n for vertex in self.vertices:\n convertedPoints.append(\n self.parent.toPixel(vertex).astype(int)) # make it so 'toPixel' can handle arrays of coordinates\n\n pygame.gfxdraw.aapolygon(display, convertedPoints, color)\n pygame.gfxdraw.filled_polygon(display, convertedPoints, color)\n\n # if self.draw_start_arrow:\n\n # self.start_arrow.draw(graph)\n # if self.draw_end_arrow:\n # self.end_arrow.draw(graph)\n\n\nclass Circle(Mobject):\n params = {\n 'center': (0, 0),\n 'radius': 1,\n 'color': white,\n 'width': 1, # use width=0 to fill shape\n 'resolution': 100 # number of points\n }\n\n def __init__(self, parent, **kwargs):\n Mobject.__init__(self, parent, **kwargs)\n self.update_points()\n if self.width == 0:\n self.filled = True\n else:\n self.filled = False\n\n def update_points(self):\n angles = np.linspace(0, 2 * np.pi, self.resolution)\n self.points = self.radius * np.transpose(np.array([np.cos(angles), np.sin(angles)]))\n\n def draw(self, display): # needs work\n points = []\n for point in self.points:\n points.append(self.parent.toPixel(point).astype(int))\n if self.filled:\n gfxdraw.filled_polygon(display, points, self.color)\n else:\n gfxdraw.aapolygon(display, points, self.color)","repo_name":"gkowash/Animation-engine-old","sub_path":"mobjects.py","file_name":"mobjects.py","file_ext":"py","file_size_in_byte":5945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8567688053","text":"import boto3\r\nimport datetime\r\n\r\ntry:\r\n from .config import boto3_config\r\nexcept ImportError as e:\r\n boto3_config = {}\r\n\r\n\r\nclass LamdaMetrics:\r\n def __init__(self, starttime: datetime, endtime: datetime, target: str = \"\"):\r\n if starttime >= endtime:\r\n raise Exception(\r\n f\"starttime {str(starttime)} needs to be less than endtime {str(endtime)}\"\r\n )\r\n\r\n self.client = boto3.client(\"cloudwatch\", **boto3_config)\r\n self.target = target\r\n self.metric_stats = {\r\n \"Namespace\": \"AWS/Lambda\",\r\n \"StartTime\": starttime,\r\n \"EndTime\": endtime,\r\n \"Period\": 60,\r\n \"Dimensions\": [\r\n {\"Name\": \"FunctionName\", \"Value\": self.target},\r\n ],\r\n }\r\n\r\n def __str__(self):\r\n return f\"Metrics for {self.target}\"\r\n\r\n @property\r\n def duration(self):\r\n return {\r\n \"MetricName\": \"Duration\",\r\n \"Statistics\": [\"Average\"],\r\n \"Unit\": \"Milliseconds\",\r\n }\r\n\r\n @property\r\n def invocations(self):\r\n return {\"MetricName\": \"Invocations\", \"Statistics\": [\"Sum\"], \"Unit\": \"Count\"}\r\n\r\n @property\r\n def errors(self):\r\n return {\"MetricName\": \"errors\", \"Statistics\": [\"Sum\"], \"Unit\": \"Count\"}\r\n\r\n def _set_metric_stats(self, metric: str):\r\n if metric not in self.__dir__():\r\n raise Exception(f\"{metric} Not a defined metric.\")\r\n self.metric_stats.update(getattr(self, metric))\r\n\r\n def get_metric_statistics(self, metric: str):\r\n self._set_metric_stats(metric)\r\n return self.client.get_metric_statistics(**self.metric_stats)\r\n","repo_name":"afontana1/Data-Engineering","sub_path":"Algorithms & Data Structures/aws/cloudwatch/lambda_metrics.py","file_name":"lambda_metrics.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"2229049983","text":"#! /usr/bin/python\nimport os\nimport numpy as np\nimport cv2\nimport subprocess\nimport rospy, ros_numpy, rospkg\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge\n\n\n\nclass CameraNode:\n def __init__(self, width=1280, height=720):\n self.width = width\n self.height = height\n\n # camera parameter setting\n cmd = list()\n video_id = \"0\"\n cmd.append('v4l2-ctl -d /dev/video' + video_id + ' -c brightness=0')\n cmd.append('v4l2-ctl -d /dev/video' + video_id + ' -c contrast=32')\n cmd.append('v4l2-ctl -d /dev/video' + video_id + ' -c saturation=55')\n cmd.append('v4l2-ctl -d /dev/video' + video_id + ' -c hue=0')\n cmd.append('v4l2-ctl -d /dev/video' + video_id + ' -c white_balance_temperature_auto=0') # manual mode\n cmd.append('v4l2-ctl -d /dev/video' + video_id + ' -c gamma=100')\n cmd.append('v4l2-ctl -d /dev/video' + video_id + ' -c gain=0')\n cmd.append('v4l2-ctl -d /dev/video' + video_id + ' -c power_line_frequency=1')\n cmd.append('v4l2-ctl -d /dev/video' + video_id + ' -c white_balance_temperature=0')\n cmd.append('v4l2-ctl -d /dev/video' + video_id + ' -c sharpness=2')\n cmd.append('v4l2-ctl -d /dev/video' + video_id + ' -c backlight_compensation=1')\n cmd.append('v4l2-ctl -d /dev/video' + video_id + ' -c exposure_auto=1') # manual mode\n cmd.append('v4l2-ctl -d /dev/video' + video_id + ' -c exposure_absolute=179')\n \n for cmd_i in cmd: \n subprocess.check_output(cmd_i, shell=True)\n\n \n\n def publish_loop(self):\n # load calibration data\n rospack = rospkg.RosPack()\n path = rospack.get_path('ros_camera') + \"/config/calib_opencv/\"\n camera_mat = np.loadtxt(path+'K.csv', delimiter=',')\n dist_coef = np.loadtxt(path+'d.csv', delimiter=',')\n \n # instance of vedeo capture \n cap = cv2.VideoCapture(-1)\n cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'))\n cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.width) \n cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height) \n\n # ros setting \n rospy.init_node(\"image_raw_node\", disable_signals=True)\n pub_cam = rospy.Publisher(\"stream\", Image, queue_size=1)\n r = rospy.Rate(30)\n bridge = CvBridge()\n img = Image()\n\n # run publish loop\n while not rospy.is_shutdown():\n ret, frame = cap.read()\n undistort_image = frame\n undistort_image = cv2.undistort(frame, camera_mat, dist_coef)\n\n # resize\n # frame = self.resize(frame)\n undistort_image = self.resize(undistort_image, width_height=(64, 64))\n # undistort_image = self.resize(undistort_image)\n # # add center line\n # frame = self.add_center_line(frame)\n # undistort_image = self.add_center_line(undistort_image)\n\n # print(cap.get(cv2.CAP_PROP_AUTO_EXPOSURE))\n\n # pub_cam.publish(bridge.cv2_to_imgmsg(undistort_image, encoding=\"bgr8\"))\n pub_cam.publish(ros_numpy.msgify(Image, undistort_image, encoding='bgr8'))\n \n \n r.sleep()\n\n cap.release()\n cv2.destroyAllWindows()\n\n\n def resize(self, img, width_height=(128, 128)):\n h = img.shape[1]\n w = img.shape[0]\n hc = int(h*0.5)\n\n wc = int(w*0.5)\n img = img[:, hc-wc:hc+wc]\n\n img = cv2.resize(img , width_height)\n return img\n\n\n def add_center_line(self, img): \n img[int(img.shape[0]/2.0 + 0.5)] = [255,191,0]\n img[:, int(img.shape[1]/2.0 + 0.5)] = [255,191,0]\n return img\n\n\nif __name__ == \"__main__\":\n\n cs = CameraNode()\n cs.publish_loop()\n","repo_name":"tomoya-yamanokuchi/docker-ros-connection","sub_path":"catkin_ws/src/ros_camera/src/debug_camera_node.py","file_name":"debug_camera_node.py","file_ext":"py","file_size_in_byte":3815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20359843748","text":"import numpy as np\n\n\nclass DataReader:\n knownBlockedCellValue = 0\n paddingValue = 0\n xTrain = None\n yTrain = None\n xTest = None\n yTest = None\n isPadded = False\n changeBlockedCellValue = False\n\n def __init__(self, knownBlockedCellValue, paddingValue, isPadded,\n changeBlockedCellValue):\n self.knownBlockedCellValue = knownBlockedCellValue\n self.paddingValue = paddingValue\n self.isPadded = isPadded\n self.changeBlockedCellValue = changeBlockedCellValue\n\n def getXTrainMat(self, pathToFile):\n self.xTrain = np.load(pathToFile)\n\n if self.changeBlockedCellValue:\n self.xTrain[self.xTrain == -1] = self.knownBlockedCellValue\n\n if self.isPadded:\n self.xTrain = np.pad(self.xTrain, ((0, 0), (1, 1), (1, 1)),\n 'constant',\n constant_values=(self.paddingValue))\n return self.xTrain\n\n def getYTrainMat(self, pathToFile):\n self.yTrain = np.load(pathToFile)\n return self.yTrain\n\n def getXTestMat(self, pathToFile):\n self.xTest = np.load(pathToFile)\n\n if self.changeBlockedCellValue:\n self.xTest[self.xTest == -1] = self.knownBlockedCellValue\n\n if self.isPadded:\n self.xTest = np.pad(self.xTest, ((0, 0), (1, 1), (1, 1)),\n 'constant',\n constant_values=(self.paddingValue))\n return self.xTest\n\n def getYTestMat(self, pathToFile):\n self.yTest = np.load(pathToFile)\n return self.yTest\n\n\nif __name__ == '__main__':\n dataReaderObj = DataReader(-100, -100, True, True)\n print(dataReaderObj.getXTestMat('xTest.npy'))\n print(dataReaderObj.getYTestMat('yTest.npy'))","repo_name":"PietropaoloV/GridworldML520","sub_path":"Simulator/DataReader.py","file_name":"DataReader.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72272920053","text":"#coding: utf-8\n\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom biblioteca.views import PaginaInicio, ListaEditores, DetallesEditor\nfrom misitio.views import MiVista\nfrom django.views.generic.base import TemplateView\nfrom biblioteca.forms import CrearAutor, ActualizarAutor, BorrarAutor\n \nurlpatterns = [ \n url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^hola/$', 'misitio.views.hola'), \n url(r'^fecha/$', 'misitio.views.fecha_actual'), \n url(r'^fecha/mas/(\\d{1,2})/$', 'misitio.views.horas_adelante'), \n url(r'^navegador/', 'misitio.views.mostrar_navegador'),\n url(r'^atributos_meta/', 'misitio.views.atributos_meta'),\n url(r'^atributos_meta_template/', 'misitio.views.atributos_meta_template'),\n url(r'^formulario_buscar/$', 'biblioteca.views.formulario_buscar'),\n url(r'^buscar/$', 'biblioteca.views.buscar'),\n url(r'^contactos/$', 'contactos.views.contactos'),\n url(r'^ws_temps/$', 'misitio.views.ws_temps'),\n url(r'^ws_rss/$', 'misitio.views.ws_rss'),\n url(r'^hola/$', MiVista.as_view(), name='mi_vista'),\n url(r'^benvinguda/$', PaginaInicio.as_view(), name='benvinguts'),\n url(r'^acerca/', TemplateView.as_view(template_name=\"acerca_de.html\")),\n url(r'^editores/$', ListaEditores.as_view(template_name=\"editor_list.html\"), name ='lista_editores'),\n url(r'^detalles/editor/(?P[0-9]+)/$', DetallesEditor.as_view(template_name=\"editor_detail.html\"), name='detalles_editor' ),\n url(r'^autor/agregar/$', CrearAutor.as_view(template_name='autor_form.html'), name='agregar_autor'), \n url(r'^autor/(?P[0-9]+)/$', ActualizarAutor.as_view(template_name='autor_form.html'), name='actualizar_datos'), \n url(r'^autor/(?P[0-9]+)/borrar/$', BorrarAutor.as_view(template_name='autor_confirm_delete.html'), name='borrar_autor'),\n url(r'^problematicos/$', 'biblioteca.views.pasajeros_problematicos_csv'),\n\n]\n","repo_name":"alvarporcar/exemples_django_2015","sub_path":"misitio/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17538932290","text":"#!/usr/local/bin/python3\n\n# TASKS:\n\n# 1. set up the html and identify the variable names\n## We are using \"state\" and \"numDistricts\"\n# 2. set up credentials: create a python script to be used later\n## This is one of the differences between the way we are using Python with SQLite on i6 vs. MySQL.\n# 3. imports (cgi, etc.)\n\n# We agreed in class that steps #4, #5 and #6 could happen in any order:\n# 4. connect to the database\n# 5. build a query\n# 6. set up the web page for output display\n\n# These are the steps to complete the project:\n# 7. execute the query\n# 8. display the query results (if any) to the webpage \n## Include a title area with the name of the state and number of districts selected.\n# 9. close the connection \n\n# imports:\nimport cgi, cgitb\ncgitb.enable()\n\nimport pymysql\nimport os\nfrom credentials import *\n\n\n# Next we will retrieve the values that we need from the HTML form.\n# This is the same as when we retrieved values from an HTML form for the Python/SQLite\n# classwork and assignment #5.\n\nform = cgi.FieldStorage()\nstate = form[\"state\"].value\nnum = form[\"numDistricts\"].value\n\nquery = '''SELECT sd_name,sd_pop_2010 FROM sd\n WHERE sd_state=\"{}\"\n ORDER BY sd_pop_2010 DESC\n LIMIT {}'''.format(state,num)\n\n# set up the connection \n# c is for our cursor and r is for our rows\nconnection = pymysql.connect(host,user,passwd,db,charset=\"utf8mb4\",cursorclass=pymysql.cursors.DictCursor)\nc =connection.cursor()\nc.execute(query)\n\nr = c.fetchall()\n\n# for testing purposes:\n# Note that in the first class, we commented out the lines in which we assign the HTML data to the python variables.\n# If we do that and then hard-code assignments to the state and num variables, we can test the python program\n# by running \"python3 ... .py\" at the command line.\n# print(query)\n\n# set up the web page\nprint(\"Content-type: text/html;charset=utf-8\")\nprint(\"\\n\\n\")\n\nprint('''\n\n\n \n LAMP Exercise: Python and MySQL - Fall 2017\n \n \n \n

School Districts - LAMP programming exercise using Python and MySQL

\n
\n \n

Hello, World!

\n \n''')\n\nprint(\"

Query: \"+query+\"

\")\nprint(\"
\")\nprint(\"

Here are the results:

\")\n\n# Add to this print statement to include additional fields and display formatting:\nfor row in r:\n\tprint('''

{}

'''.format(row['sd_name']))\n\nprint('''\n \n\n''')\n# close the connection\nc.close()\n","repo_name":"simonseo/database-class","sub_path":"HW7_lamp/submit.py","file_name":"submit.py","file_ext":"py","file_size_in_byte":2546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19700317611","text":"import random\nprint(random.sample(['a','b','c'],1))\n\nimport random\ndef randDif(k,n):\n if k>n:\n return []\n a = list(range(1,n+1))\n random.shuffle(a)\n return a[:k]\nprint(randDif(1,5))\n\nimport random\ntotal = 4\nli = [i for i in range(total)]\nres = []\nnum = 1\nfor i in range(num):\n t = random.randint(i,total-1)\n res.append(li[t])\n li[t], li[i] = li[i], li[t]\nprint(res)\n#其实python 已经实现这样的方法:","repo_name":"xiaoxiexiezi/selenium-python3","sub_path":"buchongfu.py","file_name":"buchongfu.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2851387036","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n# @Time : 2019/8/31 14:29\n# @Author : su\n# @File : MultipleThread5.py\n\"\"\"\npython多线程:线程同步,加锁与不加锁的区别 (即 MultipleThread2.py 和 MultipleThread3.py)\n不加锁时 ,注释以下两行代码:\n threadLock.acquire()\n threadLock.release()\n\"\"\"\nimport threading\nimport time\n\n\naList = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\n\nclass MyThread(threading.Thread): # MyThread 继承父类threading.Thread\n def __init__(self, thread_id, name, counter):\n threading.Thread.__init__(self)\n self.thread_id = thread_id\n self.name = name\n self.counter = counter\n\n def run(self):\n print('开始线程:', self.name)\n threadLock.acquire()\n print(self.name, self.counter)\n threadLock.release()\n\n def __del__(self):\n print(self.name, '线程结束!')\n\n\ndef print_time(thread_name, delay, counter):\n while counter:\n time.sleep(delay)\n aList[counter - 1] += 1\n print(\"[%s] %s修改第 %d 个值,修改后的值是:%d\" % (time.ctime(time.time()), thread_name, counter, aList[counter - 1]))\n counter -= 1\n\n\nthreadLock = threading.Lock()\nthreads = []\n\n# 创建新线程\nthread1 = MyThread(1, 'Thread-1', 1)\nthread2 = MyThread(2, 'Thread-2', 2)\n\n# 开启线程\nthread1.start()\nthread2.start()\n\n# 添加线程到线程列表\nthreads.append(thread1)\nthreads.append(thread2)\n\n# 等待所有线程完成\nfor t in threads:\n t.join()\nprint(\"退出主线程\")\n","repo_name":"suboice114/FirstPythonDemo","sub_path":"AdvancedTutorial/MultipleThread5.py","file_name":"MultipleThread5.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34016008124","text":"from Core.MyObjects import *\nfrom Core import Universals as uni\nfrom Core import Dialogs\nimport time\n\n\nclass MyThread(MThread):\n def __init__(self, action, callback=None, args=[], kwargs={}):\n MThread.__init__(self, getActiveWindow())\n self.action = action\n self.callback = callback\n self.args = args\n self.kwargs = kwargs\n self.data = None\n self.connect(self, SIGNAL(\"startedCallback\"), self.startCallback)\n\n def run(self):\n if len(self.args) == 0:\n self.data = self.action(**self.kwargs)\n else:\n self.data = self.action(*self.args, **self.kwargs)\n self.emit(SIGNAL(\"startedCallback\"))\n\n def startCallback(self):\n if self.callback is not None:\n if type(self.callback) is list:\n for cb in self.callback:\n cb(self.data)\n else:\n self.callback(self.data)\n\n\nclass MyTarPackStateThread(MThread):\n def __init__(self, _title, _tarFile, _maxMembers):\n MThread.__init__(self, getActiveWindow())\n self.isFinished = False\n self.tarFile = _tarFile\n self.maxMembers = _maxMembers\n self.dlgState = Dialogs.MyStateObject(_title, False, None, False)\n\n def run(self):\n while self.isFinished is False:\n self.dlgState.emit(SIGNAL(\"setState\"), len(self.tarFile.members), self.maxMembers)\n time.sleep(0.05)\n\n def finish(self, _returnValue=None):\n self.isFinished = True\n\n\nclass MyWaitThread(MThread):\n def __init__(self, _title):\n MThread.__init__(self, getActiveWindow())\n self.isFinished = False\n self.dlgState = Dialogs.MyStateObject(_title, False, None, False)\n\n def run(self):\n i = 0\n while self.isFinished is False:\n if i > 9:\n i = 0\n self.dlgState.emit(SIGNAL(\"setState\"), i, 10)\n time.sleep(0.05)\n i += 1\n self.dlgState.emit(SIGNAL(\"setState\"), 10, 10)\n\n def finish(self, _returnValue=None):\n self.isFinished = True\n","repo_name":"supermurat/hamsi-manager","sub_path":"Core/MyThread.py","file_name":"MyThread.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"23317176037","text":"import numpy as np\r\nimport pandas as pd\r\n\r\n# from IPython.core.interactiveshell import InteractiveShell\r\n# InteractiveShell.ast_node_interactivity = \"all\"\r\n\r\nimport matplotlib.pyplot as plt\r\n# %matplotlib inline\r\nimport seaborn as sns\r\n\r\nfrom sklearn import metrics\r\nfrom sklearn.model_selection import train_test_split, cross_validate, GridSearchCV\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\nfrom sklearn import linear_model\r\nimport miceforest as mf\r\n\r\n\r\nfrom sklearn.decomposition import PCA\r\n\r\nfrom sklearn.metrics import roc_auc_score\r\n\r\nimport sklearn.svm as svm\r\nimport xgboost as xgb\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nimport sklearn.naive_bayes as n_bayes\r\n\r\nfrom skopt import BayesSearchCV\r\nfrom skopt.plots import plot_objective, plot_histogram\r\n\r\nfrom sklearn.model_selection import cross_val_score, GridSearchCV, KFold\r\nfrom sklearn.linear_model import LinearRegression, LogisticRegression\r\nfrom sklearn.ensemble import StackingClassifier\r\n\r\n\r\ntrain = pd.read_csv('Training_Data.csv', header=0, index_col=0)\r\ntest = pd.read_csv('Test_Data.csv', header=0, index_col=0)\r\n\r\n# view\r\ntrain.head().append(train.tail())\r\ndata_describe = train.describe()\r\ntrain.info()\r\n# view missing and outlier\r\ntrain.isnull().sum() # 'clock_speed' -> 1336; \"mobile_wt' -> 30\r\ntest.isnull().sum() # full data\r\n\r\nbinary_cols = ['blue', 'dual_sim', 'four_g', 'three_g', 'touch_screen', 'wifi']\r\nnum_cols = ['battery_power', 'clock_speed', 'fc', 'int_memory', 'mobile_wt', 'm_dep', 'n_cores', 'pc', 'px_height', 'px_width', 'ram', 'sc_h', 'sc_w', 'talk_time']\r\n\r\ncols = binary_cols + num_cols\r\n\r\ntmp = pd.DataFrame()\r\ntmp['count'] = train[cols].count().values\r\ntmp['missing_rate'] = (train.shape[0] - tmp['count']) / train.shape[0]\r\ntmp['nunique'] = train[cols].nunique().values\r\ntmp.index = cols\r\n\r\n# sns.violinplot(train[num_cols])\r\n\r\n# distribution\r\n# sns.distplot(train.price_range)\r\n\r\n# numerical feature analysis\r\ntmp = pd.DataFrame(index=num_cols)\r\nfor col in num_cols:\r\n tmp.loc[col,'train_Skewness'] = train[col].skew()\r\n tmp.loc[col,'test_Skewness'] = test[col].skew()\r\n tmp.loc[col, 'train_Kurtosis'] = train[col].kurt()\r\n tmp.loc[col, 'test_Kurtosis'] = test[col].kurt()\r\n\r\n# the correlation between features and response\r\n# correlation = train[num_cols + ['price_range']].corr()\r\n# correlation['price_range'].sort_values()\r\n\r\n# Visualize the relationship between features\r\n# sns.pairplot(train[num_cols + ['price_range']])\r\n\r\n\r\n# Complete missing values ['clock_speed', 'mobile_wt']\r\n\r\n# GLM method\r\n# col_no_cs_mwt = cols\r\n# col_no_cs_mwt.remove('clock_speed')\r\n# col_no_cs_mwt.remove('mobile_wt')\r\n#\r\n# not_null_cs = train.loc[np.logical_not(train['clock_speed'].isnull()), col_no_cs_mwt]\r\n# null_cs = train.loc[train['clock_speed'].isnull(), col_no_cs_mwt]\r\n# not_null_cs_y = train.loc[np.logical_not(train['clock_speed'].isnull()), 'clock_speed']\r\n#\r\n# not_null_mwt = train.loc[np.logical_not(train['mobile_wt'].isnull()), col_no_cs_mwt]\r\n# null_mwt = train.loc[train['mobile_wt'].isnull(), col_no_cs_mwt]\r\n# not_null_mwt_y = train.loc[np.logical_not(train['mobile_wt'].isnull()), 'mobile_wt']\r\n#\r\n# poly_reg = PolynomialFeatures(degree=3)\r\n# lin_reg = linear_model.LinearRegression()\r\n# not_null_cs_ploy = poly_reg.fit_transform(not_null_cs)\r\n# lin_reg.fit(not_null_cs_ploy,not_null_cs_y)\r\n# predict_cs = lin_reg.predict(poly_reg.fit_transform(null_cs))\r\n# train.loc[train['clock_speed'].isnull(), 'clock_speed'] = predict_cs\r\n\r\n# mice forest method\r\nkernel = mf.MultipleImputedKernel(data=train, save_all_iterations=True, random_state=2021)\r\nkernel.mice(3,verbose=True)\r\nnew_train = kernel.impute_new_data(train)\r\nnew_train = new_train.complete_data(0)\r\n\r\n# Feature Engineering\r\ny = new_train.price_range\r\nnew_train.drop('price_range',axis=1,inplace=True)\r\nx = new_train\r\n\r\n# pca = PCA(n_components=10)\r\n# X = pca.fit_transform(x)\r\n\r\n\r\n\r\n# opt = BayesSearchCV(\r\n# svm.SVC(),\r\n# {\r\n# 'C': (1e-6, 1e+6, 'log-uniform'),\r\n# 'gamma': (1e-6, 1e+1, 'log-uniform'),\r\n# 'degree': (1,8),\r\n# 'kernel': (['linear', 'poly', 'rbf']),\r\n# },\r\n# n_iter=32,\r\n# random_state=0,\r\n# cv=5,\r\n# )\r\n#\r\n# opt.fit(x, y)\r\n#\r\n# print(\"val. score: %s\" % opt.best_score_)\r\n# print(\"best params: %s\" % str(opt.best_params_))\r\n#\r\n# _ = plot_objective(opt.optimizer_results_[0],\r\n# dimensions=[\"C\", \"degree\", 'ceof0', \"gamma\", \"tol\"],\r\n# n_minimum_search=int(1e8))\r\n# plt.show()\r\n\r\n\r\n\r\n\r\nclf1 = svm.SVC(\r\n kernel='poly',\r\n degree=1,\r\n gamma=1.1414652329239456,\r\n C=0.0001,\r\n )\r\nclf2 = xgb.XGBClassifier(\r\n objective='binary:logistic',\r\n scale_pos_weight=1,\r\n gamma=0.1,\r\n learning_rate=0.0688,\r\n max_depth=10,\r\n min_child_weight=1,\r\n subsample=0.9,\r\n n_estimators=5000\r\n )\r\nclf3 = KNeighborsClassifier(\r\n algorithm='auto',\r\n n_neighbors=10,\r\n p=1,\r\n weights='distance')\r\nclf4 = n_bayes.GaussianNB(var_smoothing=2.002678394914125e-09)\r\n\r\nmodels = [('SVC',clf1),('xgb',clf2),('KNC',clf3),('NBC',clf4)]\r\n\r\nstacking = StackingClassifier(\r\n estimators=models,\r\n final_estimator=LogisticRegression()\r\n)\r\nstacking.fit(x,y)\r\nresult=pd.DataFrame({'id':test.index, 'price_range':stacking.predict(test)})\r\nresult.to_csv(\"submission_edition1.csv\",index=False)\r\n","repo_name":"JuwuPu/Kaggle","sub_path":"Tabular Playground Series - Sep 2021/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":5327,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"39589591284","text":"from django.contrib import admin\n\nfrom apps.overrides.admin import AutofillCreatorModelAdmin, linkify\nfrom apps.season_04.models import StampChallengeBTBMatch, StampChampEarner\n\n\n@admin.register(StampChallengeBTBMatch)\nclass StampChallengeBTBMatchAdmin(AutofillCreatorModelAdmin):\n list_display = (\n \"match_id\",\n \"challenge\",\n \"match_link\",\n \"created_at\",\n )\n list_filter = (\"challenge\", \"creator\")\n fields = (\n \"challenge\",\n \"match_id\",\n \"creator\",\n )\n\n\n@admin.register(StampChampEarner)\nclass StampChampEarnerAdmin(AutofillCreatorModelAdmin):\n autocomplete_fields = [\"earner\"]\n list_display = (\n \"__str__\",\n linkify(\"earner\"),\n \"earned_at\",\n \"stamp_count\",\n \"creator\",\n )\n list_filter = (\"earner\", \"earned_at\", \"stamp_count\", \"creator\")\n fields = (\n \"earner\",\n \"earned_at\",\n \"stamp_count\",\n \"creator\",\n )\n","repo_name":"HaloFunTime/hft-backend","sub_path":"apps/season_04/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"72773943414","text":"import os\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nBOT_DIR = os.path.join(BASE_DIR,'bot')\nASSETS_DIR = os.path.join(BOT_DIR,'assets')\n\nCHROMIUM_SYSTEM_STRINGS = {\n 'linux':{\n 'zip_type':'linux64',\n 'driver':'chromedriver'\n },\n 'linux2':{\n 'zip_type':'linux64',\n 'driver':'chromedriver'\n },\n 'darwin':{\n 'zip_type':'mac64',\n 'driver':'chromedriver'\n },\n 'win32':{\n 'zip_type':'win32',\n 'driver':'chromedriver.exe'\n }\n}","repo_name":"JangasCodingplace/snippet_basic_selenium_setup","sub_path":"bot/globals.py","file_name":"globals.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31920853118","text":"#!/usr/bin/env python3\n\n# This is a sample Python script.\n\n# Press Shift+F10 to execute it or replace it with your code.\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\nimport requests\n\n\nurl = 'https://ktor-chat-app.herokuapp.com/chat'\n\n\ndef get_messages():\n response = requests.get(url)\n print(response.status_code)\n print(response.text)\n\n\ndef create_messages(user_name: str, text: str):\n response = requests.post(url, json={'user_name': user_name, 'text': text})\n print(response.status_code)\n print(response.text)\n\n\ndef update_message(message_id: str, user_name: str, text: str):\n response = requests.put(url, json={'id': message_id, 'user_name': user_name, 'text': text})\n print(response.status_code)\n print(response.text)\n\n\ndef delete_message(message_id: str):\n response = requests.delete(f'{url}/{message_id}')\n print(response.status_code)\n print(response.text)\n\n\n# Press the green button in the gutter to run the script.\ndef main():\n while True:\n command = input('command: ')\n\n if command == 'get':\n get_messages()\n\n elif command == 'create':\n user_name = input('user_name: ')\n text = input('text: ')\n create_messages(user_name, text)\n\n elif command == 'update':\n message_id = input('id: ')\n user_name = input('user_name: ')\n text = input('text: ')\n update_message(message_id, user_name, text)\n\n elif command == 'delete':\n message_id = input('id: ')\n delete_message(message_id)\n\n elif command == 'exit':\n break\n\n else:\n print('available commands: get, create, update, delete, exit')\n\nif __name__ == '__main__':\n main()\n\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\n","repo_name":"Wansuko-cmd/python-handson","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"41252904367","text":"import datetime\nimport random\n\nimport silly\nfrom django.contrib.auth.models import User\nfrom django.core.management.base import BaseCommand\nfrom django.db import transaction\n\nfrom expenses import models\nfrom expenses.models import Category\n\n\ndef get_random_date():\n while True:\n try:\n return datetime.date(\n random.randint(2010, 2019),\n random.randint(1, 12),\n random.randint(1, 30)\n )\n except ValueError:\n # non existent date\n pass\n\n\ndef get_paragraph(a, b):\n \"\"\"\n Produces a paragraph of text with between a and b sentences.\n \"\"\"\n return \"\\n\".join([silly.sentence() for x in range(random.randint(a, b))])\n\n\nclass Command(BaseCommand):\n help = \"Adds demo data to database.\"\n\n def add_arguments(self, parser):\n parser.add_argument('n', type=int)\n\n def handle(self, *args, **options):\n n = options['n']\n\n users = []\n for i in range(1, 6):\n user, created = User.objects.get_or_create(\n username='user{}'.format(i),\n )\n cats = list(user.categories.all())\n if not cats:\n cats = [user.categories.create(name=silly.noun()) for i in range(5)]\n\n user.set_password(\"secret1234\")\n user.save()\n users.append(user)\n\n for i in range(n):\n with transaction.atomic():\n o = models.Expense(\n user=random.choice(users),\n date=get_random_date(),\n amount=\"{:.2f}\".format(random.uniform(1, 100)),\n title=\"{} {}\".format(silly.adjective(), silly.noun()).title(),\n description=get_paragraph(1, 3),\n )\n o.full_clean()\n o.save()\n sample = set(random.sample(cats, random.randint(1, 3)))\n for cat in sample:\n o.categories.add(cat)\n\n for i in range(random.randint(0, 5)):\n o.comments.create(\n content=get_paragraph(1, 4),\n )\n","repo_name":"nonZero/Spending","sub_path":"expenses/management/commands/make_data.py","file_name":"make_data.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37586372400","text":"# Databricks notebook source\n# MAGIC %md\n# MAGIC ![](/files/Blueprint_logo.png)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC # Optimizing Advertising Spend with Machine Learning & Databricks\n# MAGIC * This is a new accelerator from Blueprint\n# MAGIC \n# MAGIC * For additional information how to run these notebooks go to notebook 00_START_HERE\n# MAGIC \n# MAGIC The diagram below will guide you so that you know where you are\n# MAGIC \n# MAGIC **Please Note**: \n# MAGIC \n# MAGIC * This new accelerator is based on original Databricks Accelerator [Solution Accelerator: Multi-touch Attribution](https://databricks.com/blog/2021/08/23/solution-accelerator-multi-touch-attribution.html)\n# MAGIC \n# MAGIC * As of this X-Challenge version the original Databricks data is still required, so the original Databricks notebooks are still required to create the BRONZE and SILVER raw data tables\n\n# COMMAND ----------\n\n# DBTITLE 1,Score Data with Machine Learning and Databricks\n# MAGIC %md\n# MAGIC ### Generate Predictions for\n# MAGIC * Impressions per client per channel\n# MAGIC * CTR per client per channel\n# MAGIC * CVR per client per channel\n# MAGIC * Txns per client\n# MAGIC * Cost per Impressions per channel\n# MAGIC * Cost per click per client\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ![](/files/ad_spend_flow_05.png)\n\n# COMMAND ----------\n\n# MAGIC %run ./99_utils\n\n# COMMAND ----------\n\nparams = get_params()\ndatabase_name = params['database_name']\nraw_data_path = params['raw_data_path']\nbronze_tbl_path = params['bronze_tbl_path']\nprint(database_name)\nprint(raw_data_path)\n\n# COMMAND ----------\n\nprint(database_name)\n_ = spark.sql('USE {}'.format(database_name))\n\n# COMMAND ----------\n\nfrom pyspark.sql.functions import lit, when, col\n\n# COMMAND ----------\n\nscoring_data = (spark.sql(\"\"\"select *, NTILE(10) OVER (ORDER BY VISIT_RANK2) AS user_rank\n from val_impr_clicks_and_cost\"\"\")\n .withColumnRenamed('visit_impr', 'impr')\n .withColumnRenamed('sum_visit_impr', 'user_visits')\n .withColumnRenamed('visit_clicks', 'clicks')\n .withColumnRenamed('visit_txns', 'txns')\n .withColumnRenamed('max_cpm', 'cpm')\n .withColumnRenamed('avg_cpi', 'cpi')\n .withColumnRenamed('bid_cpc', 'cpc')\n .withColumnRenamed('impr', 'impr_pred')\n .withColumnRenamed('clicks', 'clicks_pred')\n .withColumnRenamed('txns', 'txns_pred')\n .withColumn('ctr_pred', when( col(\"impr_pred\") > 0 , col(\"clicks_pred\") / col(\"impr_pred\") ).otherwise(0) ) \n .withColumn('cvr_pred', when( col(\"clicks_pred\") > 0 , col(\"txns_pred\") / col(\"clicks_pred\") ).otherwise(0) )\n .withColumnRenamed('gpt', 'gpt_pred')\n .withColumnRenamed('cpi', 'cpi_pred')\n .withColumnRenamed('cpc', 'cpc_pred') \n )\n\npred_cols = [col_name for col_name in scoring_data.columns if 'pred' in col_name]\n#['impr_pred', 'clicks_pred', 'txns_pred', 'cpi_pred', 'cpc_pred', 'gpt_pred', 'ctr_pred', 'cvr_pred']\nuser_cols = ['uid', 'user_rank']\nts_cols = [ 'time', 'month', 'dayofweek', 'weekofyear', 'timestamp']\nbid_unit_cols = ['user_rank', 'channel', 'product_stars']\ngroup_by_cols = ['month', 'weekofyear'] + bid_unit_cols\nkey_cols_str = \", \".join(group_by_cols)\nprint(bid_unit_cols)\nprint(scoring_data.columns)\nprint(key_cols_str)\nprint(pred_cols)\nscoring_data_raw = scoring_data.select(group_by_cols + pred_cols)\nscoring_data_raw.createOrReplaceTempView('scoring_data')\n\nscoring_data2 = (spark.sql(\"\"\"\n SELECT *, \n A.sum_ctr_pred / A.cnt AS ctr_pred,\n A.sum_cvr_pred / A.cnt AS cvr_pred\n FROM (\n SELECT month, weekofyear, user_rank, channel, product_stars,\n sum(impr_pred) AS impr_pred,\n sum(clicks_pred) AS clicks_pred,\n sum(txns_pred) AS txns_pred,\n avg(cpi_pred) AS cpi_pred,\n avg(cpc_pred) AS cpc_pred,\n avg(gpt_pred) AS gpt_pred,\n sum(ctr_pred) AS sum_ctr_pred,\n sum(cvr_pred) AS sum_cvr_pred,\n count(*) AS cnt\n FROM scoring_data\n GROUP BY month, weekofyear, user_rank, channel, product_stars\n ) AS A \n \"\"\").drop(*['sum_ctr_pred', 'sum_cvr_pred', 'cnt'])\n )\n\ndisplay(scoring_data2)\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC SELECT *, \n# MAGIC A.sum_ctr_pred / A.cnt AS ctr_pred,\n# MAGIC A.sum_cvr_pred / A.cnt AS cvr_pred\n# MAGIC FROM (\n# MAGIC SELECT month, weekofyear, user_rank, channel, product_stars,\n# MAGIC sum(impr_pred) AS impr_pred,\n# MAGIC sum(clicks_pred) AS clicks_pred,\n# MAGIC sum(txns_pred) AS txns_pred,\n# MAGIC avg(cpi_pred) AS cpi_pred,\n# MAGIC avg(cpc_pred) AS cpc_pred,\n# MAGIC avg(gpt_pred) AS gpt_pred,\n# MAGIC sum(ctr_pred) AS sum_ctr_pred,\n# MAGIC sum(cvr_pred) AS sum_cvr_pred,\n# MAGIC count(*) AS cnt\n# MAGIC FROM scoring_data\n# MAGIC GROUP BY month, weekofyear, user_rank, channel, product_stars\n# MAGIC ) AS A \n\n# COMMAND ----------\n\npred_cols = [col_name for col_name in scoring_data.columns if 'pred' in col_name]\nprint(pred_cols)\n\n# COMMAND ----------\n\n# DBTITLE 1,Save to Delta Lake\nscore_table_name = \"score_data_with_pred\"\nout_score_df = scoring_data2\n\n(out_score_df\n .write\n .format(\"delta\")\n .mode(\"overwrite\")\n .saveAsTable(score_table_name)\n)\n\n# COMMAND ----------\n\n\n","repo_name":"BlueprintTechnologies/Blueprint-Databricks-Advertising-Spend-Optimization-Accelerator","sub_path":"X-Challenge-Apr-1/ad_spend_optimizer/05b_score_data_NEW.py","file_name":"05b_score_data_NEW.py","file_ext":"py","file_size_in_byte":5616,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"1825418342","text":"# МНОЖЕСТВА\n# # set\n# s = set() #пустое множество\n# s1 = {1, 2, 3, 3} #повторения исключаются\n# print(s1)\n# s2 = {'A', 'Б', 'B'} #это неупорядоченный тип данных\n# print(s2)\n# s1 = {1, 2, 3, 4, 5}\n# s2 = {3, 4, 5, 6, 7}\n# print(s1.union(s2)) # обьединение\n# print(s1 | s2) # обьединение\n\n# print(s1.intersection(s2)) # пересечение\n# print(s1 & s2) # пересечение\n\n# print(s1.difference(s2)) # разность\n# print(s1 - s2) # разность\n\n# print(s1.symmetric_difference(s2)) # симметрическая разность\n# print(s1 ^ s2)\n\n\n#CЛОВАРИ\n# d = {} #пустой словарь\n# d1 = {'Пи': 3.14,\n# 'Преподаватель': 'Aнтон',\n# 'Список дел': ['Выжить', 'ловить балдеж']}\n#\n# print(d1['Преподаватель'])\n# print(d1['Список дел'][1])\n\n\n\n# from random import randint\n# lst = []\n# for _ in range (5):\n# randint(1,5)\n# lst.append(randint(1,5))\n# print(lst)\n# unique = set(lst)\n# print(unique)\n#\n# print(f\"{len(unique)} штук: {unique}\")\n\n\n#\n# from random import randint\n#\n# lst1 = []\n# lst2 = []\n#\n# size = randint(100, 1000)\n# r_start = 0\n# r_end = 10_000 \n# for _ in range(size):\n# lst1.append(randint(r_start,r_end))\n# lst2.append(randint(r_start, r_end))\n# set1 = set(lst1)\n# set2 = set(lst2)\n#\n# inter = set1.intersection(set2)\n# print(f\"Общих чисел: {len(inter)}\")\n# print(f\"Кол-во генераций: {size}\")\n# print(f\"Минимальное значение: {min(inter)}\")\n# print(f\"Максимальное значение: {max(inter)}\")\n# # возможное решение,но простое\n# inter1 = list(inter)\n# inter1.sort()\n# print(inter1)\n# print(sorted(inter)) # sorter - преобразует в список и сортирует\n\nset1 = set()\ninsert = \"\"\nwhile insert != \"end\":\n insert = input(\"Ввод: \")\n if insert.lstrip(\"-\").isdigit(): #убирпет символ слева\n if insert not in set1:\n print(\"Нет\")\n set1.add(insert)\n else:\n print(\"Да\")\n elif insert == \"end\":\n break\n else:\n print(\"Нужно число\")\n\n\n\n","repo_name":"n-asstya22/pyton","sub_path":"Lesson_13/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"576201089","text":"import PIL\nimport tensorflow as tf\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder, Normalizer, MinMaxScaler\n\nlabels = pd.read_csv('label.csv')['label'].values\nle = LabelEncoder()\nle.fit(labels)\nlabels = le.transform(labels)\nfolder_names = le.inverse_transform([0, 1, 2, 3, 4, 5, 6, 7])\nprint(folder_names)\nlabels = np.eye(8)[labels]\nlabels = labels[:3500]\nprint(labels.shape)\n\n\ndef rgb2gray(rgb):\n return np.dot(rgb[..., :3], [0.299, 0.587, 0.114])\n\n\ndef load_image(infilename):\n img = np.asarray(PIL.Image.open(infilename))\n return rgb2gray(img)\n\n\npath = 'dataset/new_train/'\nimages = []\nfor i in range(1, 3501):\n if i % 1000 == 0:\n print(str(i))\n images += [load_image(path + str(i) + \".jpg\")]\n\nimport os\n\nfilenames = []\nfiles = []\npath = os.getcwd() + '/dataset/test_stg1/'\nfor file in os.listdir(path):\n if file.endswith('.jpg'):\n filenames += [path + file]\n files += [file]\n\npath = os.getcwd() + '/dataset/test_stg2/'\nfor file in os.listdir(path):\n if file.endswith('.jpg'):\n filenames += [path + file]\n files += ['test_stg2/' + file]\n\ntest_images = []\n\nfor fn in filenames:\n test_images += [load_image(fn)]\n\nn_classes = 8\nbatch_size = 100\n\nx = tf.placeholder('float', shape=[None, 32, 32, 1])\ny = tf.placeholder('float', shape=[None, 8])\n\nkeep_rate = 0.8\nkeep_prob = tf.placeholder(tf.float32)\n\n\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\n\ndef maxpool2d(x):\n # size of window movement of window\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n\ndef convolutional_neural_network(x):\n weights = {'W_conv1': tf.Variable(tf.random_normal([5, 5, 1, 32])),\n 'W_conv2': tf.Variable(tf.random_normal([5, 5, 32, 64])),\n 'W_fc': tf.Variable(tf.random_normal([8 * 8 * 64, 1024])),\n 'out': tf.Variable(tf.random_normal([1024, n_classes]))}\n\n biases = {'b_conv1': tf.Variable(tf.random_normal([32])),\n 'b_conv2': tf.Variable(tf.random_normal([64])),\n 'b_fc': tf.Variable(tf.random_normal([1024])),\n 'out': tf.Variable(tf.random_normal([n_classes]))}\n\n # x = tf.reshape(x, shape=[-1, 32, 32, 1])\n # print(x.shape)\n conv1 = tf.nn.relu(conv2d(x, weights['W_conv1']) + biases['b_conv1'])\n conv1 = maxpool2d(conv1)\n\n conv2 = tf.nn.relu(conv2d(conv1, weights['W_conv2']) + biases['b_conv2'])\n conv2 = maxpool2d(conv2)\n print(conv2.shape)\n fc = tf.reshape(conv2, [-1, 8 * 8 * 64])\n print(fc.shape)\n fc = tf.nn.relu(tf.matmul(fc, weights['W_fc']) + biases['b_fc'])\n fc = tf.nn.dropout(fc, keep_rate)\n print(fc.shape)\n\n output = tf.matmul(fc, weights['out']) + biases['out']\n print(output.shape)\n\n return output\n\n\ndef train_neural_network(x):\n global accuracy\n prediction = convolutional_neural_network(x)\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))\n optimizer = tf.train.AdamOptimizer().minimize(cost)\n\n hm_epochs = 5\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n for epoch in range(hm_epochs):\n i = 0\n epoch_loss = 0\n while i < len(images):\n start = i\n end = i + batch_size\n batch_x = np.array(images[start:end])\n batch_y = np.array(labels[start:end])\n batch_x = batch_x.reshape(-1, 32, 32, 1)\n batch_y = batch_y.reshape(-1, 8)\n _, c = sess.run([optimizer, cost], feed_dict={x: batch_x,\n y: batch_y})\n epoch_loss += c\n i += batch_size\n print('Epoch', epoch, 'completed out of', hm_epochs, 'loss:', epoch_loss)\n correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct, 'float'))\n img = np.array(images).reshape(3500, 32, 32, 1)\n print('Accuracy:', accuracy.eval({x: img, y: labels}))\n test = np.array(test_images).reshape(len(test_images), 32, 32, 1)\n new_arr = []\n pred = tf.arg_max(prediction, 1)\n arr = pred.eval(feed_dict={x: test})\n for a in arr:\n na = np.zeros(8)\n na[a] = 1\n new_arr += [na]\n out = pd.DataFrame(new_arr, columns=folder_names)\n out['image'] = pd.Series(files)\n out.to_csv('output.csv', index=False)\n\n\ntrain_neural_network(x)\n","repo_name":"wahyuprihantoro/fishery","sub_path":"sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":4611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31648988263","text":"# SNEAKS - Snooping Early Alert Knowledge Service\n# - *- coding: utf- 8 - *- .\n__author__ = 'Xaime'\n\nimport os\nimport sys\nimport importlib\nimport logging\nfrom datetime import datetime, timedelta\nfrom ConfigParser import RawConfigParser\nimport codecs\nfrom modules.report import *\nfrom operator import itemgetter\nimport pythoncom\nimport win32serviceutil\nimport win32service\nimport win32event\nimport servicemanager\nimport socket\nimport time\n\n\nclass AppServerSvc (win32serviceutil.ServiceFramework):\n \"\"\"\n Esta es la clase que se usa para que sneaks funcione como un servicio de windows.\n\n Es el único código dependiente del OS, con lo que portar la aplicación a UNIX solo\n implica sacar el código de main() y colocarlo dentro del código que implemente\n la especificación PEP 3143, \"Standard daemon process library\". Hay decenas de librerías\n en PyPI que pueden usarse para realizar ésto en muy pocas lineas.\n \"\"\"\n \"\"\"\n \"\"\"\n _svc_name_ = \"SNEAKS\"\n _svc_display_name_ = \"SNEAKS\"\n _svc_description_ = \"Snooping Early Alert Knowledge Service\"\n\n def __init__(self, args):\n win32serviceutil.ServiceFramework.__init__(self, args)\n self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)\n socket.setdefaulttimeout(60)\n self.service_stop = False\n\n def SvcStop(self):\n self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)\n win32event.SetEvent(self.hWaitStop)\n self.service_stop = True\n\n def SvcDoRun(self):\n servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE,\n servicemanager.PYS_SERVICE_STARTED,\n (self._svc_name_, ''))\n self.main()\n\n def main(self):\n\n class Person:\n def __init__(self, individual):\n self.person = individual\n self.datasources = []\n self.eval_total = []\n self.fetched_data = []\n self.ips = []\n self.detection_data = []\n self.detection = False\n self.alarmed = False\n self.name = u''\n self.notify = False\n self.alarm_threshold = 10\n self.email = ''\n self.prev_alarm_level = 0\n self.enabled = self.__read_config()\n if not self.enabled:\n logger.critical('Error leyendo la configuración [general] de %s', self.person)\n exit(1)\n\n def __read_config(self):\n \"\"\"\n Obtiene la configuración de la persona de su archivo .person y devuelve True si es válida\n \"\"\"\n pparser = RawConfigParser()\n with codecs.open(\"config/\" + self.person + '.person', 'r', encoding='utf-8') as cf:\n pparser.readfp(cf)\n if pparser.has_section(\"general\"):\n if pparser.has_option('general', 'name'):\n self.name = pparser.get('general', 'name')\n if self.name == '':\n return False\n else:\n return False\n if pparser.has_option('general', 'notify'):\n self.notify = pparser.getboolean('general', 'notify')\n if pparser.has_option('general', 'alarm_threshold'):\n self.alarm_threshold = pparser.getint('general', 'alarm_threshold')\n if pparser.has_option('general', 'email'):\n self.email = pparser.get('general', 'email')\n if self.email == '' and self.notify:\n return False\n else:\n return False\n return True\n\n def get_data(self, downloaded):\n \"\"\"\n Se obtienen/descargan los datos de cada uno de los plugins\n \"\"\"\n self.fetched_data = downloaded\n for datasource in self.datasources:\n if datasource.enabled:\n datasource.get_data(self.fetched_data)\n for fetched in datasource.fetched_data:\n if not self.fetched_data.count(fetched):\n self.fetched_data.append(fetched) # Consolidamos la lista de datos ya descargados\n\n def get_ips(self, ctime_frame, cgiven_time):\n \"\"\"\n Se obtienen las ips detectadas por los plugins. Algunas ya vendrán marcadas como positivos, otras\n se marcarán como positivas al ser detectadas por dos o más plugins.\n \"\"\"\n self.ips = []\n for datasource in self.datasources:\n if datasource.enabled:\n ips = datasource.get_ips(ctime_frame, cgiven_time)\n only_ips = [a[0] for a in ips]\n person_only_ips = [b[0] for b in self.ips]\n for ip in only_ips: # Consolidamos una lista de ips detectadas\n if person_only_ips.count(ip):\n self.ips[person_only_ips.index(ip)][1] = True # IP detectada 2 veces, positiva\n else:\n self.ips.append(ips[only_ips.index(ip)]) # IP nueva, la añadimos\n return self.ips\n\n def eval_data(self, ctime_frame, canalyzed_time, cgiven_time, cconfirmed_ips):\n self.detection_data = [0] * canalyzed_time\n for datasource in self.datasources:\n if datasource.enabled:\n data = datasource.eval_data(ctime_frame, canalyzed_time, cgiven_time, cconfirmed_ips)\n for x in range(canalyzed_time):\n self.detection_data[x] += data[x] # Suma de todas las evaluaciones de los plugins\n max_detection = max(self.detection_data)\n if max_detection:\n self.detection = True\n if max_detection >= person.alarm_threshold:\n self.alarmed = True\n else:\n self.alarmed = False\n else:\n self.detection = False\n\n def get_report_data(self, ctime_frame, cgiven_time, cconfirmed_ips):\n report_data = []\n for datasource in self.datasources:\n if datasource.enabled:\n data = datasource.get_report_data(ctime_frame, cgiven_time, cconfirmed_ips)\n if data:\n dataplusname = [n + [self.name] for n in data] # se añade el nombre de la persona\n report_data += dataplusname\n return sorted(report_data, key=itemgetter(0))\n\n def get_positive_ips(iinterval, itime, ipeople):\n \"\"\"\n Se obtienen las ips detectadas para cada persona. Algunas ya vendrán marcadas como positivos, otras\n se marcarán como positivas al ser detectadas para dos o más personas.\n \"\"\"\n people_ips = []\n for iperson in ipeople:\n iperson.get_ips(iinterval, itime)\n person_only_ips = [a[0] for a in iperson.ips]\n people_only_ips = [b[0] for b in people_ips]\n for ip in person_only_ips: # Consolidamos una lista de ips detectadas\n if people_only_ips.count(ip):\n people_ips[people_only_ips.index(ip)][1] = True # IP detectada 2 veces, se marca como positivo\n else:\n people_ips.append(iperson.ips[person_only_ips.index(ip)]) # IP nueva, la añadimos\n\n ipositive_ips = []\n for ip in people_ips:\n if ip[1]:\n ipositive_ips.append(ip[0]) # Se genera una lista solo con IPs marcadas como positivos\n\n return ipositive_ips\n\n os.chdir(os.path.dirname(os.path.realpath(__file__))) # Se fija el directorio de trabajo\n\n # Se inicia el log de la aplicación\n\n logger = logging.getLogger('sneaks')\n logger.setLevel(logging.DEBUG)\n fh = logging.FileHandler('log/sneaks.log')\n fh.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n # Se lee sneaks.conf\n parser = RawConfigParser()\n time_frame = 240\n check_interval = 60\n org_alarm_threshold = 10\n min_people_alarm = 2\n admin_email = ''\n with codecs.open('config/sneaks.conf', 'r', encoding='utf-8') as f:\n parser.readfp(f)\n if parser.has_section(\"general\"):\n if parser.has_option('general', 'time_frame'):\n time_frame = parser.getint('general', 'time_frame')\n if time_frame < 1 or time_frame > 1440:\n logger.critical('Error en sneaks.conf: time_frame')\n exit(1)\n if parser.has_option('general', 'check_interval'):\n check_interval = parser.getint('general', 'check_interval')\n if check_interval < 1 or check_interval > time_frame:\n logger.critical('Error en sneaks.conf: check_interval')\n exit(1)\n if parser.has_option('general', 'org_alarm_threshold'):\n org_alarm_threshold = parser.getint('general', 'org_alarm_threshold')\n if parser.has_option('general', 'min_people_alarm'):\n min_people_alarm = parser.getint('general', 'min_people_alarm')\n if parser.has_option('general', 'admin_email'):\n admin_email = parser.get('general', 'admin_email')\n if admin_email == '':\n logger.critical('Error en sneaks.conf: admin_email')\n exit(1)\n\n else:\n logger.critical(u'Error en sneaks.conf: falta la sección [general]')\n exit(1)\n\n # Busca módulos en el directorio plugins los importa. Crea una lista con los plugins importados.\n try:\n plugin_dir = os.listdir('plugins')\n plugin_dir.remove('__init__.py')\n i = 0\n while i < len(plugin_dir):\n if plugin_dir[i].endswith('.py'):\n plugin_dir[i] = 'plugins.' + plugin_dir[i][:-3]\n i += 1\n else:\n plugin_dir.pop(i)\n plugin_dir.sort()\n plugin_list = map(importlib.import_module, plugin_dir)\n except:\n logger.critical('Fallo en la carga de plugins')\n sys.exit(1)\n\n # Busca ficheros de configuración de persona en el directorio config y devuelve una lista con sus nombres\n person_list = os.listdir('config')\n if person_list != []:\n i = 0\n while i < len(person_list):\n if person_list[i].endswith('.person'):\n logger.info('%s detectado', person_list[i])\n person_list[i] = person_list[i][:-7]\n i += 1\n else:\n person_list.pop(i)\n else:\n logger.critical(u'No se encuentra ningún archivo de configuración .person')\n exit(1)\n\n # Se crea la lista de objetos de clase Person. Cada uno se corresponde con una persona vigilada.\n people = []\n for user in person_list:\n people.append(Person(user))\n\n # Se asocia una lista de objetos de clase [plugin].Datasource a cada persona,\n # aunque solo se usarán aquellos activados\n for person in people:\n for plugin in plugin_list:\n person.datasources.append(plugin.DataSource(person.person))\n\n temptime = datetime.utcnow()\n timenow = temptime - timedelta(seconds=temptime.second) # redondeo al segundo 00\n prev_time = datetime(1, 1, 1, 1, 1, 1)\n prev_alarm_level = 0\n\n while not self.service_stop:\n\n if timenow > prev_time + timedelta(minutes=check_interval):\n\n logger.debug(u\"Comenzando comprobaciones de SNEAKS\")\n\n downloaded_data = []\n\n # Se obtienen/descargan los datos de cada persona\n for person in people:\n person.get_data(downloaded_data)\n for fet in person.fetched_data:\n if not downloaded_data.count(fet):\n downloaded_data.append(fet) # Consolidamos la lista de datos ya descargados\n\n positive_ips = get_positive_ips(time_frame + check_interval, timenow, people)\n\n # Se generan las las puntuaciones llamando al método eval data de cada persona. Solo se tomarán en\n # cuenta los eventos de IPs positivas, que añadirán su puntuación asociada.\n # Las puntuaciones son una lista con un elemento por cada uno de los últimos 'check_interval'\n # minutos antes de la hora actual. Cada elemento de la lista devuelta contiene el valor acumulado de\n # las detecciones durante el intervalo previo 'timeframe'\n\n detected_people = 0\n org_eval_data = [0] * check_interval\n for person in people:\n person.eval_data(time_frame, check_interval, timenow, positive_ips)\n if person.detection:\n detected_people += 1\n for j in range(check_interval):\n org_eval_data[j] += person.detection_data[j]\n alarm_level = max(org_eval_data)\n\n # Si se supera el úmbral se envía email con el informe al administrador\n graph_generated = False\n if alarm_level >= org_alarm_threshold and detected_people >= min_people_alarm:\n\n logger.info(u\"ALARMA nivel \" + str(alarm_level) + u\" de footprintg para la organización\")\n\n # si el nivel de alarma sube o abandona la zona de alarma\n if (org_eval_data[-1] > prev_alarm_level) or (org_eval_data[-1] < org_alarm_threshold):\n\n prev_alarm_level = alarm_level\n\n # Se generan los gráficos\n org_report_chart(time_frame, check_interval, timenow, positive_ips,\n people, org_alarm_threshold, plugin_dir)\n graph_generated = True\n\n # Se genera el informe de la alarma de la organización\n save_org_report(time_frame, check_interval, timenow, positive_ips, people,\n org_alarm_threshold, plugin_dir, \"temp/orgreport.html\")\n\n # Se envía un email con el informe de la alarma al administrador\n send_report_mail(admin_email, \"temp/orgreport.html\",\n 'Alarma de footprinting ' + timenow.strftime(\"%Y%m%d%H%M\"))\n\n else:\n logger.debug(u\"Nivel de footprinting de la organización: \"\n + str(alarm_level) + u\". No hay alarma.\")\n\n # Se comprueba si es necesario notificar a algún usuario\n for person in people:\n if person.alarmed and person.notify:\n\n person_alarm_level = max(person.detection_data)\n\n logger.info(u\"ALARMA nivel \" + str(person_alarm_level) + u\" de footprintg para \" + person.name)\n\n # si el nivel de alarma sube o abandona la zona de alarma\n if (person.detection_data[-1] > person.prev_alarm_level) or \\\n (person.detection_data[-1] < person_alarm_level):\n\n person.prev_alarm_level = person_alarm_level\n\n if not graph_generated: # No se han generado los gráficos\n # Se generan los gráficos\n org_report_chart(time_frame, check_interval, timenow, positive_ips, people,\n org_alarm_threshold, plugin_dir)\n\n # Se genera el informe de la alarma de la persona\n save_person_report(time_frame, check_interval, timenow, positive_ips, person, plugin_dir,\n \"temp/\" + person.person + \"-report.html\")\n\n # Se envía un email con el informe de la alarma a la persona\n send_report_mail(person.email, \"temp/\" + person.person + \"-report.html\",\n 'Alarma de footprinting ' + timenow.strftime(\"%Y%m%d%H%M\"))\n\n else:\n logger.debug(u\"Nivel de footprinting de \" + person.name + u\": \" +\n str(max(person.detection_data)) + u\". No hay alarma.\")\n\n prev_time = timenow\n\n else:\n time.sleep(15)\n temptime = datetime.utcnow()\n timenow = temptime - timedelta(seconds=temptime.second)\n\n\nif __name__ == '__main__':\n win32serviceutil.HandleCommandLine(AppServerSvc)","repo_name":"xaime/sneaks","sub_path":"sneaks.py","file_name":"sneaks.py","file_ext":"py","file_size_in_byte":17561,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1863349862","text":"import json\nimport numpy as np\n\nclass Py2Vec(object):\n \"\"\"Load a Py2Vec JSON model and provide access to the vectors and vector\n space.\n Provides access to word vectors as if it were a dictionary:\n py2vec = Py2Vec('file')\n py2vec['word']\n Unrecognized words will return the Null vector (all 0).\n Also provides a way to find the closest word to a vector in the vector\n space.\n Args:\n json_file (str): Location of the JSON Py2Vec file.\n Attributes:\n null_vector (numpy array): The null (0) vector as a numpy array. It has\n the correct size for the model's vector space.\n vector_size (int): The number of dimensions in the vector space.\n \"\"\"\n def __init__(self, json_file):\n self.__model = {}\n self.__line_to_word = {}\n space = []\n # Load the Py2Vec data from a file\n with open(json_file, 'r') as open_file:\n tmp_model = json.load(open_file)\n self.__model = {k: np.array(v) for k, v in tmp_model.iteritems()}\n\n for line_number, key_word in enumerate(self.__model):\n vector = self.__model[key_word]\n self.__line_to_word[line_number] = key_word\n space.append(vector)\n\n # Set up a vector space so we can quickly find the closest vector\n self.__vector_space = np.array(space)\n\n # Null vector for unrecognized words\n self.vector_size = len(vector)\n self.null_vector = np.zeros(self.vector_size)\n\n def get_model(self):\n \"\"\"Return the dictionary model that is used for the code content vectorizer functions (kaggle, git)\n Args:\n None\n Returns:\n __model: The trained py2vec model as a dictionary\n \"\"\"\n return self.__model\n\n def __getitem__(self, key):\n \"\"\"Return the vector representation of a word.\n Args:\n key (str): A word to locate in the vector space.\n Returns:\n numpy array: The location of the word in the vector space, or the\n null (0) vector if the word is not found.\n \"\"\"\n return self.__model.get(key, self.null_vector)\n\n def closest_words(self, input_arg, n=1):\n \"\"\"Return the n closest word to a given vector.\n Args:\n input_arg (str or numpy array): Either a string of a word in the\n model, or a vector of the same dimension as the vector space.\n n (Optional[int]): The number of values to return. Defaults to 1.\n Returns:\n list of tuples: A list containing tuples of the form:\n (distance, word). None is returned if a string was provided as\n an argument that is not in the model.\n \"\"\"\n # If you gave us a word, find the vector, otherwise if the word is not\n # in the model return None.\n if isinstance(input_arg, basestring):\n key = input_arg.lower()\n vector = self.__model.get(key, None)\n if vector is None:\n return None\n else:\n vector = input_arg\n\n # Find the closest vectors, note that we use n+1 because we sometimes\n # discard the vector with distance == 0 and we still want to have n\n # results.\n squares = (self.__vector_space - vector)**2\n distances = np.sum(squares, axis=1)\n line_numbers = np.argpartition(distances, n+1)[:n+1]\n\n # argpartition partitions the list around the nth element, but does not\n # guarantee the order is correct, so we have to sort.\n output = []\n for line_number in line_numbers:\n dist = distances[line_number]\n # Throw out identical vectors, there should be only one\n if dist == 0:\n continue\n\n word = self.__line_to_word[line_number]\n output.append((round(dist, 3), word))\n\n return sorted(output)[:n]\n","repo_name":"Lab41/hermes","sub_path":"src/utils/Py2Vec.py","file_name":"Py2Vec.py","file_ext":"py","file_size_in_byte":3939,"program_lang":"python","lang":"en","doc_type":"code","stars":125,"dataset":"github-code","pt":"21"} +{"seq_id":"25743967606","text":"# 05\n# Medium\n\n# Word Ladder Problem: You are given 2 words A and B, both of the same length. Your task is to \n# transform one word to another changing only one letter each time. Each intermediate word should \n# be a valid word in the dictionary. Print out the length of the path. \n\n# (Alternate version: print out the intermediate words)\n\n# A = CAB, B = DOG\n# Result: 4 (CAB -> COB -> COG -> DOG)\n\n\n# -----------------------------------------------------\n\n# Time Complexity:\n# * All Words Approach: O(26^N), because in the worst case, you will go through every word of size n.\n# * Optimized Approach: O(N * M), where N is the number of words in the dictionary and M is the length of\n# the longest word. This time is taken to build the pre-processing map.\n# The BFS takes O(N * M) in this case, where N is the number of words in the dictionary and M is the\n# length of the longest word. This happens when we go through all the words.\n\n# Space Complexity:\n# * All Words Approach: O(26^N), because we store every node in the queue.\n# * Optimized Approach: O(N * M), where N is the number of words in the dictionary and M is the length of\n# the longest word. This space is taken on the pre-processing map.\n\nfrom collections import deque\n\nclass WordNode(object):\n def __init__(self, word: str):\n self.word = word\n self.wc_list = [None] * len(word)\n for i in range(len(word)):\n self.wc_list[i] = f\"{word[:i]}*{word[i+1:]}\"\n self.path = []\n\n def __repr__(self) -> str:\n return self.word\n\ndef generate_wc_map(valid_words: list) -> dict:\n dc = {}\n for w in valid_words:\n wn = WordNode(w)\n for wc in wn.wc_list:\n if wc in dc:\n dc[wc].append(wn.word)\n else:\n dc[wc] = [wn.word]\n return dc\n\ndef word_ladder_bfs(start_w: str, target_w: str, valid_words: list) -> list:\n wcs_to_words = generate_wc_map(valid_words)\n start_node = WordNode(start_w)\n start_node.path = [start_w]\n q = deque()\n q.appendleft(start_node)\n while q:\n source_node = q.pop()\n if source_node.word == target_w:\n return source_node.path\n for wc in source_node.wc_list:\n for word in wcs_to_words[wc]:\n if word in source_node.path:\n continue\n word_node = WordNode(word)\n # NOTE: this is where I've made the same mistake a couple times: lists are passed by reference, unless mutated during course of assignment as below\n word_node.path = source_node.path + [word_node.word]\n q.appendleft(word_node)\n\n# -----------------------------------------------------\n\nimport pytest\n\ndef test_word_ladder():\n valid_words = ['dog', 'dot', 'cog', 'dig', 'fog', 'bog', 'cop', 'cob', 'con', 'did', 'fig', 'pig', 'big', 'can', 'ton', 'top', 'dad', 'dud', 'pin', 'fin', 'bin', 'ban', 'pan', 'tap', 'pad', 'tab', 'cab'] \n start_word = 'dog'\n target_word = 'cab'\n path = ['dog', 'cog', 'cob', 'cab']\n assert(word_ladder_bfs(start_word, target_word, valid_words) == path)\n\npytest.main()\n","repo_name":"dannynoonan/interview-prep","sub_path":"ic/15_graphs/05_word_ladder.py","file_name":"05_word_ladder.py","file_ext":"py","file_size_in_byte":3117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"75027576053","text":"#!/usr/bin/env python2\n\nimport rospy\nfrom std_srvs.srv import Empty\n\nrospy.init_node('myagv_navigation')\n\npause_service = rospy.ServiceProxy('/pause_navigation', Empty)\n\npaused = False\n\ndef toggle_paused():\n global paused\n paused = not paused\n if paused:\n rospy.loginfo('Pausing navigation')\n pause_service()\n else:\n rospy.loginfo('Resuming navigation')\n\nrospy.Timer(rospy.Duration(0.1), lambda event: None)\n\nrospy.sleep(1) # Wait for services to be registered\n\nrospy.loginfo('Press spacebar to toggle navigation state')\n\nwhile not rospy.is_shutdown():\n key = raw_input()\n if key == ' ':\n toggle_paused()\n elif key == 'q':\n break\n","repo_name":"elephantrobotics/myagv_ros","sub_path":"build/myagv_navigation/catkin_generated/installspace/stop_resume_navigation1.py","file_name":"stop_resume_navigation1.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"73280466931","text":"from matplotlib.patches import Rectangle\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom blockhead.interval_tree import preOrderTraversal\n\n\ndef default_display(series, scale_space, interval_tree, figsize=(10, 12)):\n \"\"\" A default display showing the series with the associated\n scale space segmentation.\n\n Parameters\n ----------\n\n series: array\n The time series.\n\n scale_space: dict\n The associated scale space (see utils.create_scale_space).\n\n interval_tree: IntervalNode\n The root node of the interval tree (see interval_tree).\n\n Returns\n -------\n\n fig: matplotlib figure\n \"\"\"\n # Display the image and plot all contours found\n fig, ax = plt.subplots(1, 2, figsize=figsize, sharey=True)\n tune = 1\n\n ax[0].plot(series, np.arange(len(series)))\n\n extent = [0, 50, len(series) - 1, 0]\n ax[1].imshow(\n scale_space[\"second\"],\n aspect='auto',\n vmin=-tune,\n vmax=tune,\n cmap='seismic',\n extent=extent,\n alpha=1)\n\n contour_list = interval_tree[\"contour_list\"]\n for contour in contour_list:\n ax[1].plot(\n contour['data'][:, 1], contour['data'][:, 0], linewidth=1, alpha=1)\n\n rect_list = preOrderTraversal(interval_tree[\"root\"])\n for rect in rect_list:\n bottom = rect['bottom']\n top = rect['top']\n scale = rect['scale']\n rect = Rectangle(\n (0, top), scale, bottom-top, linewidth=1, edgecolor='k',\n fill=False)\n ax[1].add_patch(rect)\n\n return fig\n","repo_name":"brendonhall/blockhead","sub_path":"blockhead/visual.py","file_name":"visual.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73790950451","text":"\nimport numpy as np\nimport time\nimport itertools\nimport torch\nfrom collections import Counter, defaultdict\nimport torch.nn as nn\nfrom torch_geometric.utils import *\nfrom QNetwork import Memory, EstimatorNetwork, Selector\nimport tqdm\nfrom collections import namedtuple\nfrom copy import deepcopy\nimport random\nimport torch.nn.functional as F\nfrom sklearn import preprocessing\nfrom sklearn.metrics import f1_score\n\nclass DepthMemory(Memory):\n def __init__(self, *args, **kwars):\n super(DepthMemory, self).__init__(*args, **kwars)\n \n def fed_reward(self, reward):\n for index, r in zip(range(self.fed_reward_index, len(self.memory)), reward):\n self.memory[index].reward = r.expand(self.memory[index].state.shape[0]).view(-1, 1)\n self.fed_reward_index = len(self.memory)\n\nclass DepthSelector(Selector):\n def __init__(self, *args, **kwargs):\n super(DepthSelector, self).__init__(*args, **kwargs)\n self.max_k_hop = self.action_num\n self.qnet = EstimatorNetwork(self.max_k_hop, self.state_shape, self.mlp_layers, self.device)\n self.qnet.eval()\n for p in self.qnet.parameters():\n if len(p.data.shape) > 1:\n nn.init.xavier_uniform_(p.data)\n self.memory = DepthMemory(self.replay_memory_size, self.batch_size)\n self.mse_loss = nn.MSELoss(reduction='mean')\n self.optimizer = torch.optim.Adam(self.qnet.parameters(), lr=self.lr)\n\n \n def predict(self, node_list, graph_embedding, graph):\n neighbor_embedding_list = []\n for center_node in node_list:\n # one_hop_nodes = set(k_hop_subgraph(int(center_node), 1, graph.edge_index)[0].numpy()) - set([int(center_node)])\n one_hop_nodes = set(k_hop_subgraph(int(center_node), self.max_k_hop, graph.edge_index)[0].numpy()) - set([int(center_node)])\n if len(one_hop_nodes):\n neighbor_embedding_list.append(graph_embedding[list(one_hop_nodes)].mean(0))\n else:\n neighbor_embedding_list.append(graph_embedding[center_node])\n center_node_embedding = torch.cat((graph_embedding[node_list], torch.stack(neighbor_embedding_list)), dim=1)\n self.qnet.eval()\n with torch.no_grad():\n depth_prob = self.qnet(center_node_embedding)\n if self._train:\n self.memory.save(center_node_embedding, depth_prob)\n depth_prob = F.softmax(depth_prob, dim=1)\n assert len(torch.isnan(depth_prob).nonzero()) == 0\n return depth_prob\n \n def train(self, t):\n self.qnet.train()\n self.optimizer.zero_grad()\n state, action, reward = self.memory.sample()\n t.eval()\n with torch.no_grad():\n target_action = t.forward(state)\n a = torch.argmax(target_action, dim=-1)\n r = (torch.ones_like(reward)*-0.5).masked_fill_(reward, 0.5).squeeze()\n y = r + self.discount_factor * target_action.max(1)[0]\n q = self.qnet(state)\n Q = torch.gather(q, dim=-1, index=a.unsqueeze(-1)).squeeze(-1)\n loss = self.mse_loss(Q, y)\n self.optimizer.step()\n return loss.item()\n\n \n ","repo_name":"RingBDStack/AdaSNN","sub_path":"model/depth_selector.py","file_name":"depth_selector.py","file_ext":"py","file_size_in_byte":3162,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"5835043401","text":"import pandas as pd\nimport nltk\nimport pymorphy2\nimport string\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import SnowballStemmer\n\nnltk.download('punkt')\nnltk.download('stopwords')\n\nmorph = pymorphy2.MorphAnalyzer()\n\nrussian_stop_words = stopwords.words(\"russian\") + list(string.punctuation + \"»«-''\\\\/``1234567890\")\nsnowball = SnowballStemmer(language=\"russian\")\ntable1 = pd.read_excel(\"./Реестр проектов.xlsx\").iloc[4:996]\ntable2 = pd.read_excel(\"./Перечень открытых запросов.xlsx\").iloc[2:]\n\n\ndef tokenize_sentence(sentence):\n tokens_arr = word_tokenize(sentence, language=\"russian\")\n tokens_arr = [i for i in tokens_arr if i not in russian_stop_words]\n tokens_arr = [morph.parse(i)[0].normal_form for i in tokens_arr]\n return tokens_arr\n\n\ndef get_match_count(s1, s2):\n return len(set(s1).intersection(set(s2))) # need optimize\n\n\ndef get_best_variants(tokens_arr, sentence):\n r = tokenize_sentence(sentence)\n res = []\n for n, elem in enumerate(tokens_arr):\n res.append((get_match_count(r, elem), elem, n))\n res.sort(reverse=True)\n\n return res\n\n\ntokens = []\nfor line in table1.values:\n tokens.append(line[1] + line[2] if type(line[2]) == str else line[1])\n tokens[-1] = tokens[-1].replace(\"ё\", \"е\").replace(\"Ё\", \"Е\")\n tokens[-1] = tokenize_sentence(tokens[-1])\n\n\ndef get_variants(sentence, limit): # need edit, make table1 as field in model\n variants = get_best_variants(tokens, sentence)\n return [list(table1.iloc[i[2]].values) + [i[0] / float(len(i[1]))] for i in variants[:limit]]\n\n\ntokens2 = []\nfor line in table2.values:\n tokens2.append(line[2])\n tokens2[-1] = tokens2[-1].replace(\"ё\", \"е\").replace(\"Ё\", \"Е\")\n tokens2[-1] = tokenize_sentence(tokens2[-1])\n\ndef get_open_requests(sentence, limit=3):\n variants = get_best_variants(tokens2, sentence)\n return [list(table2.iloc[i[2]].values) + [i[0] / float(len(i[1]))] for i in variants[:limit]]\n","repo_name":"Dimus99/cerebra_ksaviera","sub_path":"api/controller_words.py","file_name":"controller_words.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10404839699","text":"import pickle\nfrom collections.abc import Mapping, MutableMapping\nfrom copy import deepcopy\nfrom typing import Iterator, List\n\nfrom crabwalk import Types\n\n\ndef test_mapping() -> None:\n instance = Types()\n assert issubclass(Types, MutableMapping)\n assert issubclass(Types, Mapping)\n assert isinstance(instance, MutableMapping)\n assert isinstance(instance, Mapping)\n\n\ndef test_new() -> None:\n types = Types({\"py\": [\"*.py\"]})\n assert types[\"py\"] == (\"*.py\",)\n\n class A:\n def keys(self) -> Iterator[str]:\n yield \"rust\"\n\n def __getitem__(self, key: str) -> List[str]:\n if key == \"rust\":\n return [\"*.rs\"]\n else:\n raise KeyError(key)\n\n types = Types(A())\n assert types[\"rust\"] == (\"*.rs\",)\n\n types = Types([(\"js\", [\"*.js\"])])\n assert types[\"js\"] == (\"*.js\",)\n\n types = Types(ts=[\"*.ts\"])\n assert types[\"ts\"] == (\"*.ts\",)\n\n\ndef test_update() -> None:\n types = Types()\n types.update({\"py\": [\"*.py\"]})\n assert types[\"py\"] == (\"*.py\",)\n\n class A:\n def keys(self) -> Iterator[str]:\n yield \"rust\"\n\n def __getitem__(self, key: str) -> List[str]:\n if key == \"rust\":\n return [\"*.rs\"]\n else:\n raise KeyError(key)\n\n types = Types()\n types.update(A())\n assert types[\"rust\"] == (\"*.rs\",)\n\n types = Types()\n types.update([(\"js\", [\"*.js\"])])\n assert types[\"js\"] == (\"*.js\",)\n\n types = Types()\n types.update(ts=[\"*.ts\"])\n assert types[\"ts\"] == (\"*.ts\",)\n\n\ndef test_add() -> None:\n types = Types()\n assert \"py\" not in types\n types.add(\"py\", \"*.py\")\n assert types[\"py\"] == (\"*.py\",)\n\n\ndef test_add_defaults() -> None:\n types = Types()\n types.add_defaults()\n assert types[\"rust\"] == (\"*.rs\",)\n\n\ndef test_copy() -> None:\n types = Types()\n types[\"py\"] = (\"*.py\",)\n t2 = deepcopy(types)\n assert dict(types) == dict(t2)\n\n\ndef test_pickle() -> None:\n types = Types()\n types.select(\"rust\")\n t2 = pickle.loads(pickle.dumps(types))\n assert dict(types) == dict(t2)\n assert t2.__getstate__() == {\"selections\": [(\"select\", \"rust\")]}\n","repo_name":"RazerM/crabwalk","sub_path":"tests/test_types.py","file_name":"test_types.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4309216223","text":"from core import *\n\n\ndef init_scraper1():\n url = \"http://olympus.realpython.org/profiles/aphrodite\"\n page = urlopen(url)\n\n html_bytes = page.read()\n html = html_bytes.decode(\"utf-8\")\n # print(html)\n title_index = html.find(\"\")\n\n start_index = title_index + len(\"<title>\")\n end_index = html.find(\"\")\n title = html[start_index:end_index]\n print(title)\n\n\ndef init_scraper2():\n url = \"http://olympus.realpython.org/profiles/poseidon\"\n page = urlopen(url)\n html = page.read().decode(\"utf-8\")\n start_index = html.find(\"

\") + len(\"

\")\n end_index = html.find(\"\")\n title = html[start_index:end_index]\n print(title)\n\n\ndef re_scraper():\n string = \"Everything is if it's in .\"\n string = re.sub(\"<.*?>\", \"ELEPHANTS\", string)\n print(string)\n\n\ndef re_with_scraper():\n url = \"http://olympus.realpython.org/profiles/dionysus\"\n page = urlopen(url)\n html = page.read().decode(\"utf-8\")\n\n pattern = \".*?\"\n match_results = re.search(pattern, html, re.IGNORECASE)\n title = match_results.group()\n title = re.sub(\"<.*?>\", \"\", title) # Remove HTML tags\n\n print(title)\n\n\ndef exercise_scraper():\n url = \"http://olympus.realpython.org/profiles/dionysus\"\n page = urlopen(url)\n html = page.read().decode(\"utf-8\")\n # print(html)\n start_index = html.find(\"Favorite Color:\") + len(\"Favorite Color:\")\n end_index = html.find(\"\")\n title = html[start_index:end_index]\n print(title.strip())\n\n\ndef exercise_scraper_solution():\n url = \"http://olympus.realpython.org/profiles/dionysus\"\n html_page = urlopen(url)\n html_text = html_page.read().decode(\"utf-8\")\n\n for string in [\"Name: \", \"Favorite Color:\"]:\n string_start_idx = html_text.find(string)\n text_start_idx = string_start_idx + len(string)\n\n next_html_tag_offset = html_text[text_start_idx:].find(\"<\")\n text_end_idx = text_start_idx + next_html_tag_offset\n\n raw_text = html_text[text_start_idx: text_end_idx]\n clean_text = raw_text.strip(\" \\r\\n\\t\")\n print(clean_text)\n\n\ndef exercie_with_beautifulsoup():\n base_url = \"http://olympus.realpython.org\"\n html_page = urlopen(base_url + \"/profiles\")\n html_text = html_page.read().decode(\"utf-8\")\n soup = BeautifulSoup(html_text, \"html.parser\")\n\n for link in soup.find_all(\"a\"):\n link_url = base_url + link[\"href\"]\n print(link_url)\n\n\ndef scraper_poc():\n url = \"http://olympus.realpython.org\"\n page = urlopen(url + '/profiles')\n html = page.read().decode(\"utf-8\")\n soup = BeautifulSoup(html, \"html.parser\")\n links = soup.find_all('a')\n for tag_a in links:\n print(\"{}{}\".format(url, tag_a['href']))\n\n\ndef create_browser():\n browser = mechanicalsoup.Browser()\n url = \"http://olympus.realpython.org/login\"\n page = browser.get(url)\n print(page)\n\n\ndef open_browser_and_login():\n # 1\n browser = mechanicalsoup.Browser()\n url = \"http://olympus.realpython.org/login\"\n login_page = browser.get(url)\n login_html = login_page.soup\n\n # 2\n form = login_html.select(\"form\")[0]\n form.select(\"input\")[0][\"value\"] = \"zeus\"\n form.select(\"input\")[1][\"value\"] = \"ThunderDude\"\n\n # 3\n profiles_page = browser.submit(form, login_page.url)\n print(profiles_page.url)\n\n links = profiles_page.soup.select(\"a\")\n links = profiles_page.soup.select(\"a\")\n\n for link in links:\n address = link[\"href\"]\n text = link.text\n print(f\"{text}: {address}\")\n\n\ndef exercise_mechanical_soup():\n env = {'user': \"zeus\", \"pass\": \"ThunderDude\"}\n base_url = \"http://olympus.realpython.org/login\"\n\n # 1\n browser = mechanicalsoup.Browser()\n login_page = browser.get(base_url)\n login_html = login_page.soup\n\n # 2\n form = login_html.select(\"form\")[0]\n form.select(\"input\")[0][\"value\"] = \"zeus\"\n form.select(\"input\")[1][\"value\"] = \"ThunderDude\"\n\n # 3\n profiles_page = browser.submit(form, login_page.url)\n print(profiles_page.soup.title.text)\n\n\ndef example_soup_select():\n browser = mechanicalsoup.Browser()\n page = browser.get(\"http://olympus.realpython.org/dice\")\n tag = page.soup.select(\"#time\")[0]\n result = tag.text\n\n print(f\"The result of your dice roll is: {result}\")\n\n\ndef example_with_time():\n browser = mechanicalsoup.Browser()\n\n for i in range(4):\n page = browser.get(\"http://olympus.realpython.org/dice\")\n tag = page.soup.select(\"#result\")[0]\n result = tag.text\n print(f\"The result of your dice roll is: {result}\")\n time.sleep(10)\n","repo_name":"thebinario/python_scraper_poc","sub_path":"poc.py","file_name":"poc.py","file_ext":"py","file_size_in_byte":4626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36027767661","text":"# -*- coding: UTF-8 -*-\nimport requests\nimport sys\nimport json\nimport time\n\nmobile=sys.argv[1]\nfullname=sys.argv[2]\ntitle=sys.argv[3]\naccess_token=sys.argv[4]\n\njobat = time.strftime(\"%Y-%m-%d\",time.localtime())\nnowtime = int(time.time())*1000\n#print jobat\n#print nowtime\n\n'''\n\nstr = []\nmob_name = open(\"mob_name.txt\", 'r')\nlines=mob_name.readlines()\nlist1 = []\n\nfor line in lines:\n\ttemp = line.replace('', '').split(',')\n\tlist1.append(temp)\n\n\nfor data in range(len(list1)):\n\tv = data % 3\n\tif v == 0:\n\t\ta = list1[data][0]\n\telif v == 1:\n\t\tb = list1[data][0]\n\telif v == 2:\n\t\tc = list1[data][0]\nprint list1\nprint a,b,c\n'''\n\ndef admin_add_peo():\n\turl = '' #接口url\n\n\theaders = {\n \t'Accept-Encoding': 'gzip, deflate',\n \t\t'Accept' :\t'application/json, text/plain, */*',\n \t'Content-Type':\t'application/json;charset=UTF-8',\n \t'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36',\n \t'Authorization': 'Bearer '+access_token\n\t}\n\t#post data数据\n\tdata = {\n\t\t\n\t\t\"contractVos\": [{\n\t\t\t\"startDate\": nowtime,\n\t\t\t\"attachments\": []\n\t\t}],\n\t\t\"loginAccount\": mobile,\n\t\t\"mobile\": mobile,\n\t\t\"fullname\": fullname,\n\t\t\"jobTitle\": title,\n\t\t\"joinedAt\": jobat\n\t}\n\n\t#print data\n\treturn requests.request(\"POST\", url, headers=headers, json=data).json()\n\n\nif __name__ == '__main__':\n\tadmin_add_peo()\n\n","repo_name":"zenghuiLi/Manage","sub_path":"admin_addp.py","file_name":"admin_addp.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16171723477","text":"from sys import stdin\ndef main():\n\n f,c,inicio = [int(x) for x in stdin.readline().strip().split()]\n while f:\n movimientos = {'N': (-1, 0), 'E': (0, 1), 'W': (0, -1), 'S': (1, 0)}\n casillas = f*c\n laberinto = []\n visitados = [False]*(casillas)\n secuencia = []\n posx = 0\n posy = inicio-1\n for x in range(f):\n a = stdin.readline().strip()\n laberinto.append(a)\n for x in range(casillas):\n pos_actual = posx * c + posy\n if visitados[pos_actual]:\n momento_del_ciclo = secuencia.index(pos_actual)\n pasos = momento_del_ciclo\n pasos_antes = len(secuencia) - momento_del_ciclo\n print('{} step(s) before a loop of {} step(s)'.format(pasos,pasos_antes))\n break\n \n \n visitados[pos_actual] = True\n secuencia.append(pos_actual)\n movimientox,movimientoy = movimientos[laberinto[posx][posy]]\n new_posx = posx+movimientox\n new_posy = posy+movimientoy\n\n if 0 <= new_posx < f and 0 <= new_posy < c:\n posx, posy = new_posx,new_posy\n else:\n pasos = len(secuencia)\n print('{} step(s) to exit'.format(pasos))\n break\n flag = True\n for x in range(len(visitados)):\n if visitados[x] == False:\n flag = False\n if flag:\n print('{} step(s) before a loop of {} step(s)'.format(0,(f*c)))\n break\n \n \n f,c,inicio = [int(x) for x in stdin.readline().strip().split()]\nmain()\n \n","repo_name":"CAndresRa/UvaSolution","sub_path":"10116 - Robot Motion.py","file_name":"10116 - Robot Motion.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1692002416","text":"\"\"\"empty message\n\nRevision ID: 79d7be499fd2\nRevises: 3c541b4e9ac1\nCreate Date: 2021-05-04 22:32:12.175287\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '79d7be499fd2'\ndown_revision = '3c541b4e9ac1'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user', sa.Column('note', sa.String(length=120), nullable=True))\n op.create_index(op.f('ix_user_note'), 'user', ['note'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_user_note'), table_name='user')\n op.drop_column('user', 'note')\n # ### end Alembic commands ###\n","repo_name":"kjeivers/microblog-tutorial","sub_path":"migrations/versions/79d7be499fd2_.py","file_name":"79d7be499fd2_.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7688531493","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg\n\ndef plot3D(drp, s0, s1, mina=0, maxa=65): \n fig, ax = plt.subplots(figsize=(6,6))\n u = np.arange(0, 2*np.pi*(1+1/s1), 2*np.pi/s1)\n a = np.pi/2*mina/90\n b = np.pi/2*maxa/90\n v = np.arange(a, b, (b-a)/(s0+1))\n x = np.outer(np.cos(u),np.cos(v))\n y = np.outer(np.sin(u),np.cos(v))\n ax.pcolormesh(x, y, drp, cmap=plt.cm.jet) \n ax.set_xlim(-1,1)\n ax.set_ylim(-1,1)\n ax.axis('off')\n return fig\n\nclass FixDRPCan():\n def __init__(self, master, ws, host_canvas, master_canvas, master_object):\n self.host_canvas = host_canvas\n self.canvas = master_canvas\n self.master_object = master_object\n\n def set_init_configuration(self):\n self.host_canvas.set_init_configuration()\n self.locator = self.canvas.create_oval(0,0,0,0,outline='')\n\n def on_import_setup(self):\n '''On import, bind print function and pass in relevant data'''\n self.host_canvas.on_import_setup()\n self.fetch_parent_variables()\n self.canvas.bind('', self.print_drp)\n \n def on_close_setup(self):\n self.canvas.unbind('')\n self.set_init_configuration()\n self.host_canvas.on_close_setup()\n del self.data, self.rx, self.ry, self.s0, self.s1, self.sc\n \n def fetch_parent_variables(self):\n self.data = self.master_object.data\n self.rx = self.master_object.rx\n self.ry = self.master_object.ry\n self.s0 = self.master_object.s0\n self.s1 = self.master_object.s1\n self.sc = self.master_object.sc\n\n # -------------------------------------------------------------------------\n def print_drp(self, event):\n '''\n Plots a red circle and shows current DRP.\n '''\n locX, locY = event.x, event.y\n \n x = np.clip(locX/self.sc, 0, self.ry-1).astype('int')\n y = np.clip(locY/self.sc, 0, self.rx-1).astype('int')\n \n # Display locator (red circle)\n self.canvas.delete(self.locator)\n self.locator = self.canvas.create_oval(locX-5, locY-5, locX+5, locY+5, outline='red', width=2,)\n \n # Print DRP in canvas\n drp = self.data[y, x].reshape((self.s0, self.s1)).T\n fig = plot3D(drp, self.s0, self.s1)\n plt.close()\n \n # ### OPTIONAL: PRINT DRP IN CONSOLE\n # fig, ax = plt.subplots(figsize=(8,8), dpi=200)\n # ax.imshow(drp.T, cmap=plt.cm.gray)\n # ax.axis('off')\n # plt.show()\n \n self.drpfixim = FigureCanvasAgg(fig)\n s, (width, height) = self.drpfixim.print_to_buffer()\n self.drpfixim = np.frombuffer(s, np.uint8).reshape((height, width, 4))\n \n self.host_canvas.show_rgba(self.drpfixim)\n self.master_object.nbk.select(0)\n","repo_name":"AddMELAB/pydrm_GUI","sub_path":"pydrm/app/canvas_fixDRP.py","file_name":"canvas_fixDRP.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14022117667","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 26 18:19:36 2017\n\n@author: MrTwiggy\n\"\"\"\n\nimport sys\nfrom LoadReplayData import load_all_replays, copy_dataset\n\n\n# --------------------- Main Logic -----------------------\n# Usage: Fetch the replays from a specified folder and generate training data from the games, writing it to disk as validation and training splits.\n# Example: python ./GenerateData.py ./replays/Spraget 4 1000 SpragetFrames ./data\n# Will fetch 1000 replays from ./replays/Spraget and save the frames from the game into file ./data/SpragetFrames.h5\nif __name__ == \"__main__\":\n arg_count = len(sys.argv) - 1\n \n REPLAY_FOLDER = sys.argv[1] if arg_count >= 1 else \"./replays\"\n THREAD_COUNT = int(sys.argv[2]) if arg_count >= 2 else 4\n GAMES_TO_LOAD = int(sys.argv[3]) if arg_count >= 3 else 100\n DATA_FILE_NAME = sys.argv[4] if arg_count >= 4 else \"default-data\"\n DATA_FOLDER = sys.argv[5] if arg_count >= 5 else \"./data\"\n \n temp_data_name = \"{}-temp\".format(DATA_FILE_NAME)\n \n load_all_replays(temp_data_name, REPLAY_FOLDER, GAMES_TO_LOAD, THREAD_COUNT)\n copy_dataset(temp_data_name, DATA_FILE_NAME)","repo_name":"TySayers/generals-io-bot","sub_path":"GenerateData.py","file_name":"GenerateData.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3689408114","text":"# class Tree:\n# def __init__(self, val, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def solve(self, root):\n if root==None:\n return root\n q=[[root,0]]\n chk=[]\n t=1\n while(t):\n l=q.pop(0)\n t-=1\n ptr=l[0]\n if ptr.left:\n q.append([ptr.left,l[1]+1])\n t+=1\n if ptr.right:\n q.append([ptr.right,l[1]+1])\n t+=1\n if ptr.left==None and ptr.right==None:\n chk.append(l[1])\n s=list(set(chk))\n if len(s)==1:\n return True\n return False\n","repo_name":"AvaneeshB/Python-DSA-","sub_path":"LeavesInSameLevel.py","file_name":"LeavesInSameLevel.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3531187697","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 20 15:37:11 2020\n\n@author: mheinzinger\n\"\"\"\n\nimport seaborn as sn\nimport numpy as np\nimport torch\nimport torch.utils.data\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\nfrom pathlib import Path\nimport time\nimport random\nimport copy\nimport h5py\n\n# The following settings will depend on your setup\n# matplotlib import & config\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg') # GPU is only available via SSH (no display)\nplt.clf() # clear previous figures if already existing\n\n# Device configuration\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n\n# https://discuss.pytorch.org/t/reproducibility-with-all-the-bells-and-whistles/81097\ndef seed_all(seed=42):\n print(\"[ Using Seed : \", seed, \" ]\")\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.cuda.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n return None\n\n\nclass ProtTucker(nn.Module):\n def __init__(self):\n super(ProtTucker, self).__init__()\n\n self.protTucker = nn.Sequential(\n nn.Linear(1024, 256), # 512\n nn.Tanh(),\n nn.Linear(256, 128), # 256\n )\n\n def single_pass(self, X):\n X = X.float()\n return self.protTucker(X)\n\n def forward(self, X):\n anchor = self.single_pass(X[:, 0, :])\n pos = self.single_pass(X[:, 1, :])\n neg = self.single_pass(X[:, 2, :])\n return (anchor, pos, neg)\n\n\nclass CustomDataset(torch.utils.data.Dataset):\n\n def __init__(self, train, datasplitter, n_classes, balanced_sampling=False):\n self.balanced_sampling = balanced_sampling\n self.seq_id, self.embd = zip(\n *[(seq_id, embd) for seq_id, embd in train.items()])\n\n self.id2label, self.label2id = datasplitter.parse_label_mapping_cath(\n set(train.keys()))\n\n # if classes should be sampled evenly (not all training samples are used in every epoch)\n if self.balanced_sampling:\n print(\"Using balanced sampling!\")\n self.unique_labels = self.get_unique_labels()\n self.data_len = len(self.unique_labels)\n else: # if you want to iterate over all training samples\n self.data_len = len(self.seq_id)\n\n self.id2embedding = train\n self.n_classes = n_classes # number of class levels\n\n def __len__(self):\n return self.data_len\n\n def __getitem__(self, index):\n if self.balanced_sampling: # get a CATH class, instead of a trainings sample\n c, a, t, h = self.unique_labels[index] # get CATH class\n anchor_candidates = self.label2id[c][a][t][h] # get samples within this CATH class\n anchor_id = random.choice(anchor_candidates) # randomly pick one of these samples as anchor\n anchor = self.id2embedding[anchor_id] # retrieve embedding for this sample\n anchor_label = self.id2label[anchor_id] # retrieve label for this sample\n else: # get a training sample (over-samples large CATH families according to occurance)\n anchor = self.embd[index] # get embedding of anchor\n anchor_id = self.seq_id[index] # get CATH ID of anchor\n anchor_label = self.id2label[anchor_id] # get CATH label of anchor\n pos, neg, pos_label, neg_label, pos_sim = self.get_pair(\n anchor_id, anchor_label)\n return (anchor, pos, neg, anchor_label, pos_label, neg_label, pos_sim)\n\n def get_unique_labels(self):\n unique_set = set()\n unique_labels = list()\n for _, cath_label in self.id2label.items():\n cath_str = '.'.join([str(cath_hierarchy_lvl)\n for cath_hierarchy_lvl in cath_label])\n if cath_str in unique_set:\n continue\n unique_labels.append(cath_label)\n unique_set.add(cath_str)\n print(\"Number of unique CATH labels in train: {}\".format(len(unique_set)))\n return unique_labels\n\n def get_rnd_label(self, labels, is_pos, existing_label=None):\n n_labels = len(labels)\n # if alternative labels are available, ensure difference between existing and new label\n if n_labels > 1 and existing_label is not None:\n labels = [label for label in labels if label != existing_label]\n n_labels -= 1\n\n rnd_idx = np.random.randint(0, n_labels)\n\n i = iter(labels)\n for _ in range(rnd_idx):\n next(i)\n rnd_label = next(i)\n # do not accidentaly draw the same label; instead draw again if necessary\n if existing_label is not None and rnd_label == existing_label:\n if is_pos: # return the label itself for positives\n # Allow positives to have the same class as the anchor (relevant for rare classes)\n return existing_label\n else:\n # if there exists no negative sample for a certain combination of anchor and similarity-level\n return None\n return rnd_label\n\n def get_rnd_candidates(self, anchor_label, similarity_level, is_pos):\n\n # Get CATH classification of anchor sample\n class_n, arch, topo, homo = anchor_label\n\n if similarity_level == 0: # No similarity - different class\n rnd_class = self.get_rnd_label(\n self.label2id.keys(), is_pos, class_n)\n rnd_arch = self.get_rnd_label(\n self.label2id[rnd_class].keys(), is_pos)\n rnd_topo = self.get_rnd_label(\n self.label2id[rnd_class][rnd_arch].keys(), is_pos)\n rnd_homo = self.get_rnd_label(\n self.label2id[rnd_class][rnd_arch][rnd_topo].keys(), is_pos)\n candidates = self.label2id[rnd_class][rnd_arch][rnd_topo][rnd_homo]\n\n elif similarity_level == 1: # Same class but different architecture\n rnd_arch = self.get_rnd_label(\n self.label2id[class_n].keys(), is_pos, arch)\n rnd_topo = self.get_rnd_label(\n self.label2id[class_n][rnd_arch].keys(), is_pos)\n rnd_homo = self.get_rnd_label(\n self.label2id[class_n][rnd_arch][rnd_topo].keys(), is_pos)\n candidates = self.label2id[class_n][rnd_arch][rnd_topo][rnd_homo]\n\n elif similarity_level == 2: # Same Class & Architecture but different topo\n rnd_topo = self.get_rnd_label(\n self.label2id[class_n][arch].keys(), is_pos, topo)\n rnd_homo = self.get_rnd_label(\n self.label2id[class_n][arch][rnd_topo].keys(), is_pos)\n candidates = self.label2id[class_n][arch][rnd_topo][rnd_homo]\n\n elif similarity_level == 3: # Same Class & Architecture & topo but different homo\n rnd_homo = self.get_rnd_label(\n self.label2id[class_n][arch][topo].keys(), is_pos, homo)\n candidates = self.label2id[class_n][arch][topo][rnd_homo]\n\n # Highest similarity - different homology class (only relevent for positives)\n elif similarity_level == 4:\n candidates = self.label2id[class_n][arch][topo][homo]\n\n else:\n raise NotImplementedError\n\n return candidates\n\n def check_triplet(self, anchor_label, pos_label, neg_label, neg_hardness, pos_hardness):\n assert neg_hardness < pos_hardness, print(\n \"Neg sample more similar than pos sample\")\n\n for i in range(0, pos_hardness):\n assert anchor_label[i] == pos_label[i], print(\"Pos label not overlapping:\\n\" +\n \"Diff: {}\\nanchor:{}\\npos:{}\\nneg:{}\".format(pos_hardness, anchor_label, pos_label, neg_label))\n for j in range(0, neg_hardness):\n assert anchor_label[j] == neg_label[j], print(\"Neg label not overlapping:\\n\" +\n \"Diff: {}\\nanchor:{}\\npos:{}\\nneg:{}\".format(neg_hardness, anchor_label, pos_label, neg_label))\n assert anchor_label[neg_hardness] != neg_label[neg_hardness], print(\n \"Neg label not different from anchor\")\n return None\n\n def get_pair(self, anchor_id, anchor_label, hardness_level=None, verbose=False):\n pos, neg = None, None\n pos_label, neg_label = None, None\n\n while pos is None or neg is None:\n neg_similarity = np.random.randint(self.n_classes)\n pos_similarity = neg_similarity + 1\n try:\n neg_candidates = self.get_rnd_candidates(\n anchor_label, neg_similarity, is_pos=False) # get set of negative candidates\n neg_id = random.choice(neg_candidates) # randomly pick one of the neg. candidates\n neg_label = self.id2label[neg_id] # get label of randomly picked neg.\n neg = self.id2embedding[neg_id] # get embedding of randomly picked neg.\n \n # repeat the same for the positive sample\n pos_candidates = self.get_rnd_candidates(\n anchor_label, pos_similarity, is_pos=True)\n pos_id = random.choice(pos_candidates)\n \n # ensure that we do not randomly pick the same protein as anchor and positive\n if pos_id == anchor_id and len(pos_candidates) > 1:\n while pos_id == anchor_id: # re-draw from the pos. candidates if possible\n pos_id = random.choice(pos_candidates)\n # if there is only one protein in a superfamily (anchor==positive without other candidates), re-start picking process\n elif pos_id == anchor_id and len(pos_candidates) == 1:\n continue\n\n pos = self.id2embedding[pos_id]\n pos_label = self.id2label[pos_id]\n # if we successfully picked anchor, positive and negative candidates, do same sanity checks\n if pos_label is not None and neg_label is not None:\n self.check_triplet(anchor_label, pos_label,\n neg_label, neg_similarity, pos_similarity)\n else: # if no triplet could be formed for a given combination of similarities/classes\n continue\n\n except NotImplementedError: # if you try to create triplets for a class level that is not yet implemented in get_rnd_candidates\n print(anchor_id, anchor_label)\n raise NotImplementedError\n\n except KeyError:\n # if get_rnd_label returned None because no negative could be found\n # for a certain combination of anchor protein and similarity-lvl\n # re-start picking process\n continue\n\n if verbose:\n print('#### Example ####')\n print('Anc ({}) label: {}'.format(anchor_id, anchor_label))\n print('Pos ({}) label: {}'.format(pos_id, self.id2label[pos_id]))\n print('Neg ({}) label: {}'.format(neg_id, self.id2label[neg_id]))\n print('#### Example ####')\n\n return pos, neg, pos_label, neg_label, pos_similarity\n\n def get_example(self):\n example_id = next(iter(self.id2embedding.keys()))\n example_label = self.id2label[example_id]\n self.get_pair(example_id, example_label, verbose=True)\n return None\n\n\nclass DataSplitter():\n def __init__(self, embedding_p, verbose=True):\n self.verbose = verbose\n self.data_dir = embedding_p.parent\n self.id2embedding = self.get_precomputed_embeddings(embedding_p)\n\n if verbose:\n print('Loaded embeddings for n_proteins: {}'.format(\n len(self.id2embedding)))\n\n self.cath_label_path = self.data_dir / 'cath-domain-list.txt'\n self.id2label, self.label2id = self.parse_label_mapping_cath(\n set(self.id2embedding.keys()))\n\n def get_id2embedding(self):\n return self.id2embedding\n\n def parse_label_mapping_cath(self, id_subset):\n id2label = dict()\n label2id = dict()\n with open(self.cath_label_path, 'r') as f:\n for n_domains, line in enumerate(f):\n\n # skip header lines\n if line.startswith(\"#\"):\n continue\n\n data = line.split()\n identifier = data[0]\n # skip annotations of proteins without embedding (not part of data set)\n if identifier not in id_subset:\n continue\n\n cath_class = int(data[1])\n cath_arch = int(data[2])\n cath_topo = int(data[3])\n cath_homo = int(data[4])\n\n if cath_class not in label2id:\n label2id[cath_class] = dict()\n if cath_arch not in label2id[cath_class]:\n label2id[cath_class][cath_arch] = dict()\n if cath_topo not in label2id[cath_class][cath_arch]:\n label2id[cath_class][cath_arch][cath_topo] = dict()\n if cath_homo not in label2id[cath_class][cath_arch][cath_topo]:\n label2id[cath_class][cath_arch][cath_topo][cath_homo] = list()\n\n id2label[identifier] = [cath_class,\n cath_arch, cath_topo, cath_homo]\n label2id[cath_class][cath_arch][cath_topo][cath_homo].append(\n identifier)\n\n if self.verbose:\n print('Finished parsing n_domains: {}'.format(n_domains))\n print(\"Total length of id2label: {}\".format(len(id2label)))\n return id2label, label2id\n\n def read_cath_ids(self, path):\n ids = set()\n id_list = list()\n seq_test = dict()\n\n with open(path, 'r') as f:\n for line in f:\n line = line.strip()\n if line.startswith('>'):\n line = line.replace(\">\", \"\")\n if '|' in line:\n seq_id = line.split('|')[2]\n else:\n seq_id = line\n if seq_id in ids: # some weird double entries in CATH test set..\n continue\n ids.add(seq_id)\n id_list.append(seq_id)\n seq_test[seq_id] = list()\n else:\n seq_test[seq_id].append(line)\n\n # some identical sequences need to be removed\n seq_set = {''.join(seq): seq_id for seq_id, seq in seq_test.items()}\n id_list = [seq_id for seq, seq_id in seq_set.items()]\n\n # assert that no identical seqs are in the sets\n assert len(seq_set) == len(id_list)\n if self.verbose:\n print('Example CATH ID: {}'.format(seq_id))\n print('-- Loaded {} proteins from {}'.format(len(id_list), path))\n return id_list\n\n def get_precomputed_embeddings(self, embedding_p):\n # load pre-computed embeddings in .h5 file format\n h5_f = h5py.File(embedding_p, 'r')\n try:\n dataset = {seq_id.split(\"|\")[2].split(\"_\")[0]: np.expand_dims(np.array(next(iter(embd.items()))[1]), axis=0)\n for seq_id, embd in h5_f.items()}\n except AttributeError:\n dataset = {seq_id.split(\"|\")[2].split(\"_\")[0]: np.expand_dims(np.array(embd), axis=0)\n for seq_id, embd in h5_f.items()}\n\n print(\"Example: {}\".format(next(iter(dataset.keys()))))\n return dataset\n\n def get_embeddings(self, fasta_path):\n cath_ids = self.read_cath_ids(fasta_path)\n embeddings = dict()\n for cath_id in cath_ids:\n try:\n embd = self.id2embedding[cath_id]\n except KeyError:\n print('No embedding found for: {}'.format(cath_id))\n continue\n embeddings[cath_id] = torch.tensor(embd).to(device)\n return embeddings\n\n def get_predef_splits(self, p_train=None, p_test=None):\n\n if p_train is None or p_test is None:\n p_train = self.data_dir / \"train74k.fasta\"\n p_val = self.data_dir / \"val200.fasta\"\n p_valLookup20 = self.data_dir / \"train74k.fasta\"\n\n val = self.get_embeddings(p_val)\n valLookup20 = self.get_embeddings(p_valLookup20)\n train = self.get_embeddings(p_train)\n\n if self.verbose:\n print('##########')\n print('Finished splitting data!')\n print('Train set size: {}'.format(len(train)))\n print('Val set size: {}'.format(len(val)))\n print('ValLookup20 size: {}'.format(len(valLookup20)))\n print('##########')\n return train, val, valLookup20\n\n\nclass MyCollator(object):\n def __call__(self, batch):\n X = list()\n Y = list()\n sim = list()\n for (anchor, pos, neg, anchor_label, pos_label, neg_label, pos_sim) in batch:\n x = torch.cat([anchor, pos, neg], dim=0)\n X.append(x.view(1, 3, -1))\n Y.append(self.get_label_vector(anchor_label, pos_label, neg_label))\n sim.append(pos_sim)\n return (torch.cat(X, dim=0), torch.cat(Y, dim=0), torch.tensor(sim))\n\n def get_label_vector(self, anchor_label, pos_label, neg_label):\n anc = torch.tensor(anchor_label).view(1, -1)\n pos = torch.tensor(pos_label).view(1, -1)\n neg = torch.tensor(neg_label).view(1, -1)\n y = torch.cat([anc, pos, neg], dim=0)\n return y.view(1, 3, -1)\n\n\nclass plotter():\n def __init__(self, log_dir):\n self.init_plotting()\n self.log_dir = log_dir\n\n def init_plotting(self):\n params = {\n 'axes.labelsize': 13, # increase font size for axis labels\n }\n plt.rc(params) # apply parameters\n return plt, sn\n\n def merge_pdfs(self, pdf1_path, pdf2_path, output_path):\n # Merge two PDFs\n from PyPDF2 import PdfFileMerger\n pdfs = [pdf1_path, pdf2_path]\n\n merger = PdfFileMerger()\n\n for pdf in pdfs:\n merger.append(pdf)\n\n merger.write(str(output_path))\n merger.close()\n return None\n\n def plot_minMaxMean(self, train_minMax, file_name='minMaxMean.pdf'):\n plt, _ = self.init_plotting()\n\n # Plot first three samples in Batch in one figure\n fig, axes = plt.subplots(1, 1)\n\n x = np.asarray(train_minMax['min'])\n y = np.asarray(train_minMax['max'])\n z = np.asarray(train_minMax['mean'])\n L = np.arange(1, x.size+1)\n\n axes.plot(L, x, 'g', label='Min')\n axes.plot(L, y, 'r', label='Max')\n axes.plot(L, z, 'b', label='Mean')\n\n axes.set_xlabel('Steps/Batches')\n axes.set_ylabel('min/max/mean')\n\n _ = plt.legend()\n plt.title('Min/Max/Mean development')\n\n pdf_path = self.log_dir / file_name\n fig.savefig(str(pdf_path), format='pdf')\n\n plt.close(fig) # close figure handle\n return None\n\n def plot_distances(self, dist_pos, dist_neg, file_name='distances.pdf'):\n plt, _ = self.init_plotting()\n\n # Plot first three samples in Batch in one figure\n fig, axes = plt.subplots(1, 1)\n\n x = np.asarray(dist_pos)\n y = np.asarray(dist_neg)\n L = np.arange(1, x.size+1)\n\n axes.plot(L, x, 'g', label='Dist. Pos')\n axes.plot(L, y, 'r', label='Dist. Neg')\n\n axes.set_xlabel('Steps/Batches')\n axes.set_ylabel('Distances')\n\n _ = plt.legend()\n plt.title('Distance development')\n\n pdf_path = self.log_dir / file_name\n fig.savefig(str(pdf_path), format='pdf')\n\n plt.close(fig) # close figure handle\n return None\n\n def plot_acc(self, acc, baseline, diff_classes=4, file_name='acc.pdf'):\n\n plt, _ = self.init_plotting()\n\n\n fig, axes = plt.subplots(1, 1)\n\n colors = ['r', 'b', 'g', 'm']\n for diff_class in range(diff_classes):\n x = np.asarray(acc[diff_class])\n max_acc_idx = np.argmax(x)\n max_acc = x[max_acc_idx]\n L = np.arange(1, x.size+1)\n b = np.ones(x.size) * baseline[diff_class]\n axes.plot(L, x, colors[diff_class], label='LvL.: {} # {:.3f} in epoch {}'.format(\n diff_class, max_acc, max_acc_idx))\n axes.plot(L, b, colors[diff_class]+'-.')\n\n axes.set_xlabel('Steps/Batches')\n axes.set_ylabel('Accuracy')\n\n _ = plt.legend()\n plt.title(file_name.replace('.pdf', ''))\n\n pdf_path = self.log_dir / file_name\n fig.savefig(str(pdf_path), format='pdf')\n\n plt.close(fig) # close figure handle\n return None\n\n def plot_loss(self, train, test=None, file_name='loss.pdf'):\n test = train if test is None else test\n plt, _ = self.init_plotting()\n fig, axes = plt.subplots(1, 1)\n\n x = np.asarray(train)\n y = np.asarray(test)\n L = np.arange(1, x.size+1)\n\n axes.plot(L, x, 'g', label='Train')\n axes.plot(L, y, 'r--', label='Test')\n\n axes.set_xlabel('Steps/Batches')\n axes.set_ylabel('Loss')\n\n _ = plt.legend()\n plt.title(file_name.replace('loss.pdf', ''))\n\n pdf_path = self.log_dir / file_name\n fig.savefig(str(pdf_path), format='pdf')\n\n plt.close(fig) # close figure handle\n return None\n\n\nclass Eval():\n def __init__(self, lookup, test, datasplitter, n_classes, name='cath'):\n self.lookup, self.lookupIdx2label = self.preproc(lookup)\n self.test, self.testIdx2label = self.preproc(test)\n self.id2label, self.label2id = datasplitter.parse_label_mapping_cath(\n # use only keys from the given lookup set\n set(lookup.keys()) | set(test.keys()),\n )\n self.name = name\n #self.log = self.init_log()\n self.n_classes = n_classes\n self.accs = self.init_log()\n self.errs = self.init_log()\n self.distance = torch.nn.PairwiseDistance(p=2)\n\n def get_test_set(self):\n return self.test\n\n def get_lookup_set(self):\n return self.lookup\n\n def get_acc(self):\n return self.acc\n\n def get_err(self):\n return self.err\n\n def init_log(self):\n log = dict()\n for i in range(self.n_classes):\n log[i] = list()\n return log\n\n def init_confmats(self):\n confmats = list()\n for i in range(self.n_classes):\n confmat = np.zeros((1, 2, 2))\n confmats.append(confmat)\n confmats = np.concatenate(confmats, axis=0)\n return confmats\n\n def preproc(self, data):\n idx2label = dict()\n dataset = list()\n for idx, (seq_id, embd) in enumerate(data.items()):\n idx2label[idx] = seq_id\n dataset.append(embd)\n dataset = torch.cat(dataset, dim=0)\n return dataset, idx2label\n\n def add_sample(self, y, yhat, confmats):\n wrong = False\n\n for class_lvl, true_class in enumerate(y): # for each prediction\n # skip cases where the test protein did not have had any nn in lookupDB\n # --> It is by defnition not possible that those could be predicted correctly\n if np.isnan(true_class):\n continue\n if not wrong and true_class == yhat[class_lvl]:\n correct = 1 # count only those in\n else: # if there is a wrong prediction on this level, lower-lvls are wrong by definition\n correct = 0\n wrong = True\n confmats[class_lvl, correct, correct] += 1\n return confmats\n\n def pdist(self, sample_1, sample_2, norm=2):\n return torch.cdist(sample_1.unsqueeze(dim=0), sample_2.unsqueeze(dim=0), p=norm).squeeze(dim=0)\n\n def mergeTopK(self, yhats):\n yhats = np.vstack(yhats)\n\n final_yhat = [None, None, None, None]\n for i in range(self.n_classes):\n (values, counts) = np.unique(yhats[:, i], return_counts=True)\n idxs = np.argmax(counts)\n nn_class = values[idxs]\n final_yhat[i] = nn_class\n mask = yhats[:, i] == nn_class\n yhats = yhats[mask, :]\n\n return final_yhat\n\n def mask_singletons(self, y):\n # Mask cases where the only annotated instance is the test protein\n # Those cases can not be predicted correctly without considering self-hits\n c, a, t, h = y\n if len(self.label2id[c][a][t][h]) == 1: # if h-lvl has only the test prot\n y[-1] = np.nan\n if len(self.label2id[c][a][t]) == 1: # if t-lvl \"\n y[-2] = np.nan\n if len(self.label2id[c][a]) == 1: # if a-lvl \"\n y[-3] = np.nan\n if len(self.label2id[c]) == 1: # if c-lvl \"\n y[-4] = np.nan\n return y\n\n def compute_err(self, confmat, n_bootstrap=10000):\n n_total = int(confmat.sum()) # total number of predictions\n n_wrong, n_correct = int(confmat[0, 0]), int(confmat[1, 1])\n preds = [0 for _ in range(n_wrong)] + [1 for _ in range(n_correct)]\n subset_accs = list()\n for _ in range(n_bootstrap):\n rnd_subset = random.choices(preds, k=n_total)\n subset_accs.append(sum(rnd_subset)/n_total)\n return np.std(np.array(subset_accs), axis=0, ddof=1)\n\n def evaluate(self, lookup, queries, n_nearest=1, update=True):\n p_dist = self.pdist(lookup.float(), queries.float())\n\n _, nn_idxs = torch.topk(p_dist, n_nearest, largest=False, dim=0)\n\n confmats = self.init_confmats()\n n_test = len(self.testIdx2label)\n for test_idx in range(n_test): # for all test proteins\n y_id = self.testIdx2label[test_idx] # get id of test protein\n # get annotation of test (groundtruth)\n y = copy.deepcopy(self.id2label[y_id])\n y = self.mask_singletons(y)\n\n nn_idx = nn_idxs[:, test_idx]\n yhats = list()\n for nn_i in nn_idx:\n # index of nearest neighbour (nn) in train set\n nn_i = int(toCPU(nn_i))\n # get id of nn (infer annotation)\n yhat_id = self.lookupIdx2label[nn_i]\n # get annotation of nn (groundtruth)\n yhat = self.id2label[yhat_id]\n yhat = np.asarray(yhat)\n yhats.append(yhat)\n\n if n_nearest == 1:\n assert len(yhats) == 1, print(\n \"More than one NN retrieved, though, n_nearest=1!\")\n yhat = yhats[0]\n else:\n yhat = self.mergeTopK(yhats)\n confmats = self.add_sample(y, yhat, confmats)\n\n if update: # for constantly monitoring test performance\n for i in range(self.n_classes):\n acc = confmats[i, 1, 1] / confmats[i].sum()\n err = self.compute_err(confmats[i])\n self.accs[i].append(acc)\n self.errs[i].append(err)\n return self.accs, self.errs\n\n else: # to get baseline at the beginning\n accs, errs = list(), list()\n # get accuracy per difficulty level\n for i in range(self.n_classes):\n acc = confmats[i, 1, 1] / confmats[i].sum()\n err = self.compute_err(confmats[i])\n accs.append(acc)\n errs.append(err)\n print(\"Samples for class {}: {}\".format(\n i, sum(confmats[i, :, :])))\n return accs, errs\n\n\nclass TripletLoss(object):\n \"\"\"Modified from Tong Xiao's open-reid (https://github.com/Cysu/open-reid). \n Related Triplet Loss theory can be found in paper 'In Defense of the Triplet \n Loss for Person Re-Identification'.\"\"\"\n\n def __init__(self, margin=None, exclude_easy=False, batch_hard=True):\n self.margin = margin\n self.distance = torch.nn.PairwiseDistance(p=2)\n self.exclude_easy = exclude_easy\n self.reduction = 'none' if self.exclude_easy else 'mean'\n self.batch_hard = batch_hard\n self.sample = False\n self.softmax = nn.Softmax(dim=0)\n self.min = -10**10\n if margin is not None:\n self.ranking_loss = nn.MarginRankingLoss(\n margin=margin, reduction=self.reduction)\n else:\n self.ranking_loss = nn.SoftMarginLoss(reduction=self.reduction)\n\n def __call__(self, anchor, pos, neg, Y, monitor):\n if self.batch_hard:\n dist_ap, dist_an = self.get_batch_hard(anchor, pos, neg, Y)\n else:\n dist_ap = self.distance(anchor, pos)\n dist_an = self.distance(anchor, neg)\n\n y = Variable(dist_an.data.new().resize_as_(dist_an.data).fill_(1))\n if self.margin is not None:\n loss = self.ranking_loss(dist_an, dist_ap, y)\n else:\n loss = self.ranking_loss(dist_an - dist_ap, y)\n\n if self.exclude_easy:\n loss = loss.sum() / (loss < 0).sum()\n\n embeddings = torch.cat((anchor, pos, neg))\n monitor['pos'].append(toCPU(dist_ap.mean()))\n monitor['neg'].append(toCPU(dist_an.mean()))\n\n monitor['min'].append(toCPU(embeddings.min(dim=1)[0].mean()))\n monitor['max'].append(toCPU(embeddings.max(dim=1)[0].mean()))\n monitor['mean'].append(toCPU(embeddings.mean(dim=1).mean()))\n\n monitor['loss'].append(toCPU(loss))\n monitor['norm'].append(toCPU(torch.norm(embeddings, p='fro')))\n\n return loss\n\n # https://gist.github.com/rwightman/fff86a015efddcba8b3c8008167ea705\n def get_hard_triplets(self, pdist, y, prev_mask_pos):\n n = y.size()[0]\n mask_pos = y.expand(n, n).eq(y.expand(n, n).t()).to(device)\n\n mask_pos = mask_pos if prev_mask_pos is None else prev_mask_pos * mask_pos\n\n # every protein that is not a positive is automatically a negative for this lvl\n mask_neg = ~mask_pos\n mask_pos[torch.eye(n).bool().cuda()] = 0 # mask self-interactions\n mask_neg[torch.eye(n).bool().cuda()] = 0\n\n if self.sample:\n # weighted sample pos and negative to avoid outliers causing collapse\n posw = (pdist + 1e-12) * mask_pos.float()\n posw[posw == 0] = self.min\n posw = self.softmax(posw)\n posi = torch.multinomial(posw, 1)\n\n dist_ap = pdist.gather(0, posi.view(1, -1))\n # There is likely a much better way of sampling negatives in proportion their difficulty, based on distance\n # this was a quick hack that ended up working better for some datasets than hard negative\n negw = (1 / (pdist + 1e-12)) * mask_neg.float()\n negw[posw == 0] = self.min\n negw = self.softmax(posw)\n negi = torch.multinomial(negw, 1)\n dist_an = pdist.gather(0, negi.view(1, -1))\n else:\n ninf = torch.ones_like(pdist) * float('-inf')\n dist_ap = torch.max(pdist * mask_pos.float(), dim=1)[0]\n nindex = torch.max(torch.where(mask_neg, -pdist, ninf), dim=1)[1]\n dist_an = pdist.gather(0, nindex.unsqueeze(0)).view(-1)\n\n return dist_ap, dist_an, mask_pos\n\n def get_batch_hard(self, anchor, pos, neg, Y):\n Y = torch.cat([Y[:, 0, :], Y[:, 1, :], Y[:, 2, :]], dim=0)\n X = torch.cat([anchor, pos, neg], dim=0)\n pdist = self.pdist(X)\n\n dist_ap, dist_an = list(), list()\n mask_pos = None\n\n for i in range(4):\n y = Y[:, i]\n dist_pos, dist_neg, mask_pos = self.get_hard_triplets(\n pdist, y, mask_pos)\n dist_ap.append(dist_pos.view(-1))\n dist_an.append(dist_neg.view(-1))\n dist_ap = torch.cat(dist_ap)\n dist_an = torch.cat(dist_an)\n return dist_ap, dist_an\n\n def pdist(self, v):\n dist = torch.norm(v[:, None] - v, dim=2, p=2)\n return dist\n\n\nclass Saver():\n def __init__(self, experiment_dir):\n self.experiment_dir = experiment_dir\n self.checkpoint_p = experiment_dir / 'checkpoint.pt'\n self.best_performance = 0\n self.num_classes = 4\n self.epsilon = 1e-3\n\n def load_checkpoint(self):\n state = torch.load(self.checkpoint_p)\n model = ProtTucker().to(device)\n model.load_state_dict(state['state_dict'])\n print('Loaded model from epch: {:.1f} with avg. acc: {:.3f}'.format(\n state['epoch'], self.best_avg_acc))\n return model, state['epoch']\n\n def save_checkpoint(self, model, epoch, optimizer):\n state = {\n 'epoch': epoch,\n 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n }\n torch.save(state, self.checkpoint_p)\n return None\n\n def check_performance(self, acc, model, epoch, optimizer):\n if isinstance(acc, dict): # if a list of accuracies is passed\n new_performance = acc[3][-1]\n else: # if a single Silhouette score is passed\n new_performance = acc\n if new_performance > self.best_performance + self.epsilon:\n self.save_checkpoint(model, epoch, optimizer)\n self.best_performance = new_performance\n print('New best performance found: {:.3f}!'.format(\n self.best_performance))\n return self.best_performance\n return None\n\n\ndef init_monitor():\n monitor = dict()\n\n monitor['loss'] = list()\n monitor['norm'] = list()\n\n monitor['pos'] = list()\n monitor['neg'] = list()\n\n monitor['min'] = list()\n monitor['max'] = list()\n monitor['mean'] = list()\n return monitor\n\n\n# move torch/GPU tensor to numpy/CPU\ndef toCPU(data):\n return data.cpu().detach().numpy()\n\n\n# count number of free parameters in the network\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\n\n# Create dataloaders with custom collate function\ndef dataloader(customdata, batch_size):\n my_collator = MyCollator()\n return torch.utils.data.DataLoader(dataset=customdata,\n batch_size=batch_size,\n shuffle=True,\n drop_last=True,\n collate_fn=my_collator,\n )\n\n\n\n# get baseline performance (no ProtTucker but raw pLM embeddings)\ndef get_baseline(test):\n test_set = test.get_test_set()\n train_set = test.get_lookup_set()\n acc, err = test.evaluate(train_set, test_set, update=False)\n print(('BASELINE\\nACC-C: {:.2f} +/- {:.2f}\\nACC-A: {:.2f} +/- {:.2f}\\n' +\n 'ACC-T: {:.2f} +/- {:.2f}\\nACC-H: {:.2f} +/- {:.2f}\\nAvg. Acc: {:.2f} +/- {:.2f}').format(\n acc[0], err[0], acc[1], err[1], acc[2], err[2], acc[3], err[3],\n (acc[0] + acc[1] + acc[2] + acc[3]) /\n 4, (err[0] + err[1] + err[2] + err[3])/4,\n ))\n return acc, err\n\n\n\n# test performance during training on validation set (used also for early stopping)\ndef testing(model, test):\n model.eval()\n with torch.no_grad(): # evaluate current performance (no grads)\n test_emb = test.get_test_set()\n lookup_emb = test.get_lookup_set()\n test_tucker = model.single_pass(test_emb)\n lookup_tucker = model.single_pass(lookup_emb)\n acc, err = test.evaluate(lookup_tucker, test_tucker)\n model.train()\n return acc, err\n\n\ndef main():\n # measure training time\n start_overall = time.time()\n # set random seeds\n SEED = 42\n seed_all(SEED)\n\n # set up directory structure\n root = Path.cwd()\n data_dir = root / 'data' # create a directory for logging your experiments\n log_dir = root / 'log' / 'your_log_directory'\n embedding_p = data_dir / \"ProtTucker\" /\"prott5_cath_S100.h5\" # path to your pre-computed embeddings. IDs have to align with label IDs\n print(\"Loading dataset from: {}\".format(embedding_p))\n\n # give your experiment a meaningful name here\n experiment_name = \"your_experiment\"\n\n experiment_dir = log_dir / experiment_name\n if not experiment_dir.is_dir():\n print(\"Creating new log-directory: {}\".format(experiment_dir))\n experiment_dir.mkdir(parents=True)\n\n # Hyperparameters\n learning_rate = 1e-3\n batch_size = 256 # the number of actual samples per batch might be higher due to batch-hard sampling (see paper for more details)\n num_epochs = 200 # will stop earlier if early stopping is triggered\n n_classes = 4 # number of class-lvls; makes it easier to adjust for other problems\n n_bad = 0 # counter for number of epochs that did not improve (counter for early stopping)\n n_thresh = 20 # threshold for number of epochs that did not improve (threshold for early stopping)\n batch_hard = True # whether to activate batch_hard sampling (recommneded)\n exclude_easy = False # whether to exclude trivial samples (did not improve performa)\n margin = None # set this to a float to activate threshold-dependent loss functions (see TripletLoss)\n\n # initialize plotting class (used to monitor loss etc during training)\n pltr = plotter(experiment_dir)\n\n # Prepare datasets\n datasplitter = DataSplitter(embedding_p)\n train_splits, val, val_lookup20 = datasplitter.get_predef_splits()\n\n val20 = Eval(val_lookup20, val, datasplitter, n_classes)\n\n train = CustomDataset(train_splits, datasplitter, n_classes)\n\n train.get_example()\n train.get_example()\n train.get_example()\n train = dataloader(train, batch_size)\n\n model = ProtTucker().to(device)\n criterion = TripletLoss(exclude_easy=exclude_easy,\n batch_hard=batch_hard, margin=margin)\n\n optimizer = torch.optim.Adam(\n model.parameters(), lr=learning_rate, amsgrad=True)\n\n saver = Saver(experiment_dir)\n saver.save_checkpoint(model, 0, optimizer)\n baseline_acc, baseline_err = get_baseline(val20)\n\n print('###### Training parameters ######')\n print('Experiment name: {}'.format(experiment_name))\n print('LR: {}, BS: {}, free Paras.: {}, n_epochs: {}'.format(\n learning_rate, batch_size, count_parameters(model), num_epochs))\n print('#############################\\n')\n print('Start training now!')\n\n monitor = init_monitor()\n for epoch in range(num_epochs): # for each epoch\n\n # =================== testing =====================\n start = time.time()\n acc, err = testing(model, val20) # evaluate using the validation\n test_time = time.time() - start\n new_best = saver.check_performance(\n acc, model, epoch, optimizer) # early stopping class\n\n if new_best is None: # if the new accuracy was worse than a previous one\n n_bad += 1\n if n_bad >= n_thresh: # if more than n_bad consecutive epochs were worse, break training\n break\n else: # if the new accuracy is larger than the previous best one by epsilon, reset counter\n n_bad = 0\n\n # =================== training =====================\n # monitor epoch-wise performance\n epoch_monitor = init_monitor()\n start = time.time()\n for train_idx, (X, Y, _) in enumerate(train): # for each batch in the training set\n X = X.to(device)\n Y = Y.to(device)\n anchor, pos, neg = model(X)\n loss = criterion(anchor, pos, neg, Y, epoch_monitor)\n\n # =================== backward ====================\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n train_time = time.time() - start\n\n # monitor various metrics during training\n monitor['loss'].append(\n sum(epoch_monitor['loss']) / len(epoch_monitor['loss']))\n monitor['norm'].append(\n sum(epoch_monitor['norm']) / len(epoch_monitor['norm']))\n monitor['pos'].append(sum(epoch_monitor['pos']) /\n len(epoch_monitor['pos']))\n monitor['neg'].append(sum(epoch_monitor['neg']) /\n len(epoch_monitor['neg']))\n monitor['min'].append(sum(epoch_monitor['min']) /\n len(epoch_monitor['min']))\n monitor['max'].append(sum(epoch_monitor['max']) /\n len(epoch_monitor['max']))\n monitor['mean'].append(\n sum(epoch_monitor['mean']) / len(epoch_monitor['mean']))\n\n train_time = time.time() - start\n\n # ===================log========================\n if epoch % 5 == 0 or epoch == num_epochs-1: # draw plots only every fifth epoch\n pltr.plot_acc(acc, baseline_acc)\n pltr.plot_distances(monitor['pos'], monitor['neg'])\n pltr.plot_loss(monitor['loss'], file_name='loss.pdf')\n pltr.plot_loss(monitor['norm'], file_name='norm.pdf')\n pltr.plot_minMaxMean(monitor)\n\n # Always print training progress to console\n print(('epoch [{}/{}], train loss: {:.3f}, train-time: {:.1f}[s], test-time: {:.1f}[s], ' +\n 'ACC-C: {:.2f}, ACC-A: {:.2f}, ACC-T: {:.2f}, ACC-H: {:.2f} ## Avg. Acc: {:.2f}').format(\n epoch + 1, num_epochs,\n monitor['loss'][-1],\n train_time, test_time,\n acc[0][-1], acc[1][-1], acc[2][-1], acc[3][-1],\n (acc[0][-1] + acc[1][-1] + acc[2][-1] + acc[3][-1])/4\n ))\n\n end_overall = time.time()\n print(end_overall-start_overall)\n print(\"Total training time: {:.1f}[m]\".format(\n (end_overall-start_overall)/60))\n return None\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Rostlab/EAT","sub_path":"train_prottucker.py","file_name":"train_prottucker.py","file_ext":"py","file_size_in_byte":41731,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"21"} +{"seq_id":"9221575861","text":"'''\r\nCreated on Jun 6, 2016\r\n\r\n@author: castonkr\r\n\r\n''' \r\nimport pygame, math, random\r\nfrom pygame.constants import K_DOWN, K_UP, K_RIGHT, K_LEFT, K_SPACE, K_q, \\\r\n K_RETURN, MOUSEBUTTONDOWN\r\nfrom pygame.draw import circle\r\nfrom shutil import which\r\nfrom pygame import sprite\r\nfrom math import *\r\n \r\n# Define some colors\r\nBLACK = (0, 0, 0)\r\nWHITE = (255, 255, 255)\r\nBLUE = (0, 0, 255)\r\nGREEN = (0, 255, 0)\r\nRED = (255, 0, 0)\r\nPURPLE = (255, 51, 204)\r\nORANGE = (255, 153, 51)\r\n\r\n# Distance Formula\r\ndef distance(point1, point2):\r\n return math.sqrt((point1[1] - point2[1]) ** 2 + (point1[0] - point2[0]) ** 2)\r\n \r\npygame.init()\r\n\r\n# Add Sounds\r\npygame.mixer.init()\r\nsong = pygame.mixer.Sound(\"bounce.wav\")\r\nweapon_sound = pygame.mixer.Sound(\"explosion.wav\")\r\ndamage = pygame.mixer.Sound(\"damage.wav\")\r\ntheme = pygame.mixer.Sound(\"theme.wav\")\r\n\r\ntheme.play()\r\n\r\n# Set the width and height of the screen [width, height]\r\nWIDTH = 700\r\nHEIGHT = 500\r\nsize = (WIDTH, HEIGHT)\r\nscreen = pygame.display.set_mode(size)\r\n \r\npygame.display.set_caption(\"Ball Game\")\r\n\r\n# Start Menu\r\nball = pygame.image.load(\"ball.png\")\r\ntitle_font = pygame.font.SysFont('Calibri', 75)\r\ntitle_text = title_font.render('Play Ball Game', True, WHITE)\r\nother_font = pygame.font.SysFont('Calibri', 25, bold=True)\r\nstart_text = other_font.render(\"Start Game\", True, WHITE)\r\nscreen.blit(ball, [315, 300])\r\nscreen.blit(title_text, [125, 125])\r\nscreen.blit(start_text, [300, 325])\r\npygame.display.flip()\r\nstart_game = True\r\nmy_clock = pygame.time.Clock()\r\nwhile start_game:\r\n events = pygame.event.get()\r\n for event in events:\r\n if event.type == pygame.QUIT:\r\n exit()\r\n if event.type == MOUSEBUTTONDOWN:\r\n a = pygame.mouse.get_pos() \r\n if distance((355, 340), a) < 40:\r\n start_game = False\r\n if start_game is True:\r\n my_clock = 0 \r\n \r\n# Loop until the user clicks the close button.\r\n# Used to manage how fast the screen updates\r\nclock = pygame.time.Clock()\r\nall_items = pygame.sprite.Group()\r\ncircLs = pygame.sprite.Group()\r\n\r\n\r\n# Constructs the balls\r\nclass Circle(pygame.sprite.Sprite):\r\n def __init__(self, color):\r\n super().__init__()\r\n self.radius = random.randrange(3, 15)\r\n self.dx = random.randrange(1, 5)\r\n self.dy = random.randrange(1, 5)\r\n self.x = random.randrange(0, WIDTH - 2 * self.radius)\r\n self.y = random.randrange(0, HEIGHT - 2 * self.radius)\r\n \r\n self.image = pygame.Surface((self.radius * 2, self.radius * 2));\r\n self.image.fill(WHITE)\r\n self.image.set_colorkey(WHITE)\r\n pygame.draw.circle(self.image, color, [self.radius , self.radius], self.radius);\r\n \r\n self.rect = pygame.Rect(self.x, self.y, self.radius * 2, self.radius * 2)\r\n \r\n # Updates the ball's position \r\n def update(self):\r\n if self.x >= WIDTH - 2 * self.radius or self.x <= 0:\r\n self.dx *= -1\r\n \r\n if self.y >= HEIGHT - 2 * self.radius or self.y <= 0:\r\n self.dy *= -1\r\n \r\n self.x += self.dx\r\n self.y += self.dy\r\n \r\n self.rect = pygame.Rect(self.x, self.y, self.radius * 2, self.radius * 2)\r\n\r\nclass Weapon(Circle):\r\n def __init__(self):\r\n super().__init__(ORANGE)\r\n self.dx = random.randrange(0, 2)\r\n self.dy = random.randrange(0, 2)\r\n \r\n self.rect = pygame.Rect(self.x, self.y, self.radius * 2, self.radius * 2)\r\n \r\n# Bounces circles off each other \r\ndef circleCollide(a, b):\r\n relx, rely = (b.x + b.radius - b.dx) - (a.x + a.radius - a.dx) , (b.y + a.radius - b.dy) - (a.y + b.radius - b.dy)\r\n relLen = math.sqrt(relx ** 2 + rely ** 2)\r\n if relLen == 0:\r\n return\r\n relx, rely = relx / relLen , rely / relLen;\r\n ah , av = a.dy * relx - a.dx * rely , a.dx * relx + a.dy * rely\r\n bh , bv = b.dy * relx - b.dx * rely , b.dx * relx + b.dy * rely\r\n ratio = a.radius / b.radius\r\n av2 = (2 * bv + av * (ratio - 1)) / (ratio + 1)\r\n bv2 = (2 * ratio * av + bv * (1 - ratio)) / (ratio + 1)\r\n a.dx, a.dy = av2 * relx - ah * rely , av2 * rely + ah * relx\r\n b.dx, b.dy = bv2 * relx - bh * rely , bv2 * rely + bh * relx\r\n\r\n# Constructs the object the Hero is to collect\r\nclass Objs(pygame.sprite.Sprite): \r\n def __init__(self):\r\n super().__init__()\r\n self.image = pygame.Surface((10, 10))\r\n self.image.fill(WHITE)\r\n self.image.set_colorkey(WHITE)\r\n pygame.draw.rect(self.image, GREEN, [0, 0, 5, 5])\r\n self.rect = self.image.get_rect()\r\n x = random.randrange(100, WIDTH - 5)\r\n y = random.randrange(40, HEIGHT - 5)\r\n self.rect = self.rect.move(x, y)\r\n \r\n # Draws the object \r\n def draw(self, surface):\r\n pygame.draw.rect(surface, RED, [self.rect.x, self.rect.y, 5, 5])\r\n \r\n # Moves the object after the Hero \"eats\" it\r\n def eaten(self):\r\n self.rect.x = random.randrange(0, WIDTH - 5)\r\n self.rect.y = random.randrange(0, HEIGHT - 5)\r\n \r\n# Constructs the hero \r\nclass Hero(pygame.sprite.Sprite):\r\n def __init__(self, x, y):\r\n super().__init__()\r\n self.x = x;\r\n self.y = y;\r\n self.rect = pygame.Rect(x, y, 20, 30)\r\n \r\n pos = [(0, 30), (20, 30), (10, 0)];\r\n self.imageUpNormal = pygame.Surface((20, 30))\r\n self.imageUpNormal.fill(WHITE)\r\n self.imageUpNormal.set_colorkey(WHITE)\r\n pygame.draw.polygon(self.imageUpNormal, BLUE, pos)\r\n\n self.imageUpHitten = pygame.Surface((20, 30))\r\n self.imageUpHitten.fill(WHITE)\r\n self.imageUpHitten.set_colorkey(WHITE)\r\n pygame.draw.polygon(self.imageUpHitten, PURPLE, pos)\r\n \r\n self.imageUp = self.imageUpNormal\r\n self.image = self.imageUp\r\n \r\n self.key = 0b0000\r\n self.hitTime = 0\r\n \r\n # Changes Hero color if hit by ball\r\n def hitten(self):\r\n self.hitTime = 10\r\n self.imageUp = self.imageUpHitten\r\n\r\n # Updates the Hero's position \n def update(self):\r\n if self.hitTime > 0:\r\n self.hitTime -= 1\r\n if self.hitTime == 0:\r\n self.imageUp = self.imageUpNormal\r\n \r\n if self.key == 0b1000:\r\n self.y -= 2\r\n self.rect = pygame.Rect(self.x, self.y, 20, 30);\r\n self.image = self.imageUp.copy()\r\n elif self.key == 0b0100:\r\n self.x += 2\r\n self.rect = pygame.Rect(self.x, self.y, 30, 30);\r\n self.image = pygame.transform.rotate(self.imageUp, -90)\r\n elif self.key == 0b0010:\r\n self.y += 2\r\n self.rect = pygame.Rect(self.x, self.y, 20, 30);\r\n self.image = pygame.transform.rotate(self.imageUp, 180)\r\n elif self.key == 0b0001:\r\n self.x -= 2\r\n self.rect = pygame.Rect(self.x, self.y, 20, 30);\r\n self.image = pygame.transform.rotate(self.imageUp, 90)\r\n elif self.key == 0b1100:\r\n self.x += 1.414\r\n self.y -= 1.414\r\n self.rect = pygame.Rect(self.x, self.y, 30, 30);\r\n self.image = pygame.transform.rotate(self.imageUp, -45)\r\n elif self.key == 0b0110:\r\n self.x += 1.414\r\n self.y += 1.414\r\n self.rect = pygame.Rect(self.x, self.y, 30, 30);\r\n self.image = pygame.transform.rotate(self.imageUp, -135)\r\n elif self.key == 0b0011:\r\n self.x -= 1.414\r\n self.y += 1.414\r\n self.rect = pygame.Rect(self.x, self.y, 30, 30);\r\n self.image = pygame.transform.rotate(self.imageUp, 135)\r\n elif self.key == 0b1001:\r\n self.x -= 1.414\r\n self.y -= 1.414\r\n self.rect = pygame.Rect(self.x, self.y, 30, 30);\r\n self.image = pygame.transform.rotate(self.imageUp, 45)\r\n\r\n if self.y + self.rect.h > HEIGHT:\r\n self.y = HEIGHT - self.rect.h\r\n elif self.y < 0:\r\n self.y = 0\r\n if self.x + 30 > WIDTH:\r\n self.x = WIDTH - 30\r\n elif self.x < 0:\r\n self.x = 0\r\n\r\n # Setting a bit-key to the arrow keys when a key is pressed\r\n def keyDown(self, key):\r\n if key == K_UP:\r\n self.key |= 0b1000\r\n elif key == K_RIGHT:\r\n self.key |= 0b0100\r\n elif key == K_DOWN:\r\n self.key |= 0b0010\r\n elif key == K_LEFT:\r\n self.key |= 0b0001\r\n \r\n # Setting a bit-key to the arrow keys when a key is released \r\n def keyUp(self, key):\r\n if key == K_UP:\r\n self.key &= 0b0111\r\n elif key == K_RIGHT:\r\n self.key &= 0b1011\r\n elif key == K_DOWN:\r\n self.key &= 0b1101\r\n elif key == K_LEFT:\r\n self.key &= 0b1110\r\n \r\n# Displays the score and health a player has \r\nclass Score:\r\n def __init__(self):\r\n self.init()\r\n \r\n def init(self):\r\n self.points = 0\r\n self.lives = 100\r\n \r\n # Draws the score and health\r\n def draw(self, surface): \r\n font = pygame.font.SysFont('Calibri', 20, True, False)\r\n text1 = font.render(\"Score: \" + str(self.points), True, WHITE)\r\n # health_bar \r\n pygame.draw.rect(surface, (150, 0, 0), (0, 20, 100, 20))\r\n pygame.draw.rect(surface, (0, 150, 0), (0, 20, self.lives, 20))\r\n if self.lives > 0:\r\n text2 = font.render(\"Health: \" + str(self.lives), True, WHITE)\r\n else:\r\n text2 = font.render(\"Game Over!\", True, RED) \r\n surface.blit(text1, [0, 0])\r\n surface.blit(text2, [0, 20])\r\n \r\n# Initializes hero, obj, score \r\nhero = Hero(WIDTH / 2, HEIGHT / 2)\r\nobj = Objs()\r\nscore = Score()\r\nweapon = Weapon()\r\n\r\n\r\n# Restarts the game\r\ndef restart():\r\n score.init()\r\n circ1 = Circle(RED)\r\n circ2 = Circle(RED)\r\n circLs.add(circ1 , circ2)\r\n print(\"restart\")\r\n global all_items\r\n global weapon\r\n weapon = Weapon()\r\n all_items.empty()\r\n all_items.add(circ1, circ2, hero, obj)\r\n all_items.add(weapon)\r\n \r\ndef main():\r\n# -------- Main Program Loop -----------\r\n while True:\r\n # --- Main event loop\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n exit()\r\n elif event.type == pygame.KEYDOWN:\r\n if event.key == K_DOWN or event.key == K_UP or event.key == K_RIGHT or event.key == K_LEFT:\r\n hero.keyDown(event.key)\r\n elif event.key == K_q:\r\n exit()\r\n elif event.key == K_RETURN:\r\n if score.lives == 0:\r\n restart()\r\n elif event.type == pygame.KEYUP:\r\n hero.keyUp(event.key)\r\n \r\n screen.fill(BLACK)\r\n \r\n # If ball collides with Hero\r\n if pygame.sprite.spritecollide(hero, circLs, False):\r\n print(\"Hero was hit!\")\r\n score.lives -= 1\r\n hero.hitten()\r\n damage.play(1, 500)\r\n \r\n # If Hero \"eats\" object\r\n if hero.rect.colliderect(obj.rect):\r\n obj.eaten()\r\n score.points += 1\r\n newCircle = Circle(RED)\r\n circLs.add(newCircle)\r\n all_items.add(newCircle)\r\n song.play(1, 200)\r\n \r\n # If Hero collects weapon \r\n if hero.rect.colliderect(weapon.rect):\r\n weapon_range = 10 * weapon.radius\r\n for c in circLs:\r\n if distance((c.x + c.radius, c.y + c.radius), (weapon.x + weapon.radius, weapon.y + weapon.radius)) < weapon_range:\r\n all_items.remove(c)\r\n circLs.remove(c)\r\n print(\"removing\")\r\n pygame.draw.circle(screen, ORANGE, (weapon.x, weapon.y), weapon_range) \r\n weapon.kill()\r\n global weapon\r\n weapon = Weapon()\r\n all_items.add(weapon)\r\n weapon_sound.play(1, 500)\r\n \r\n \r\n hero.update()\r\n # Draw objects on screen\r\n \r\n ls = circLs.sprites()\r\n for i in range(len(ls)):\r\n for j in range(i + 1, len(ls)):\r\n if distance(ls[i].rect, ls[j].rect) < ls[i].radius + ls[j].radius:\r\n circleCollide(ls[i], ls[j])\r\n \r\n circLs.update()\r\n weapon.update()\r\n hero.update()\r\n score.draw(screen)\r\n all_items.draw(screen)\r\n \r\n # --- Go ahead and update the screen with what we've drawn.\r\n pygame.display.flip()\r\n \r\n # --- Limit to 60 frames per second\r\n if score.lives <= 0:\r\n score.lives = 0\r\n all_items.empty() \r\n circLs.empty()\r\n game_over = pygame.sprite.Sprite()\r\n message = pygame.Surface((400, 200))\r\n \r\n # Game Over\r\n font = pygame.font.SysFont('Calibri', 20, True, False)\r\n text1 = font.render(\"Game Over, Press Enter to start new game\", True, WHITE)\r\n text2 = font.render(\"Q to quit\", True, WHITE) \r\n message.blit(text1, [0, 0])\r\n message.blit(text2, [0, 20])\r\n \r\n game_over.image = message\r\n \r\n game_over.rect = message.get_rect()\r\n game_over.rect.move_ip(200, 200)\r\n all_items.add(game_over)\r\n \r\n clock.tick(60)\r\n \r\n# Close the window and quit.\r\n pygame.quit()\r\n\r\nif __name__ == \"__main__\":\r\n restart()\r\n main()\r\n","repo_name":"fredzqm/ballGame","sub_path":"mainPackage/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":13630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38891603357","text":"for i in range(1, 5):\r\n print(i)\r\n# in {preferences|edit|general|cod...|python} chang no. space\r\n\r\nname = input(\"please enter youre name.\")\r\nage = int(input(\"how old are you, {0} ?\".format(name)))\r\n\r\nif age >= 18:\r\n print(\"you are old enough to vote\")\r\n print(\"please put an X in the box\")\r\nelse:\r\n print(\"please come back in {0} years\".format(18 - age))\r\n\r\ninput()","repo_name":"supernova-Z313/python_myfirst_code","sub_path":"2tab/TAB.py","file_name":"TAB.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27240643250","text":"import numpy as np\nfrom matplotlib.ticker import MultipleLocator\nfrom numpy.ma import cos\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport time\nfrom mpl_toolkits.mplot3d import Axes3D\nimport datetime\nfrom scipy.interpolate import make_interp_spline\n\nDNA_SIZE = 12 \t\t# 编码长度\nPOP_SIZE = 100 \t# 种群大小\nCROSS_RATE = 0.8 \t# 交叉率\nMUTA_RATE = 0.15 \t# 变异率\nIterations = 100 # 代次数\nX_BOUND = [0,10]\t# X区间\nY_BOUND = [0,10]\t# Y区间\n\n\ndef F(x, y): # 问题函数\n\treturn (6.452*(x+0.125*y)*(cos(x)-cos(2*y))**2)/(0.8+(x-4.2)**2+2*(y-7)**2)+3.226*y\n\ndef decodeDNA(pop): # 基因解码\n\tx_pop = pop[:,1::2]\t\t# 奇数列表示 X:取 pop 的奇数位\n\ty_pop = pop[:,::2] \t\t# 偶数列表示 Y:取 pop 的偶数位\n\tx = x_pop.dot(2**np.arange(DNA_SIZE)[::-1])/float(2**DNA_SIZE-1)*(X_BOUND[1]-X_BOUND[0])+X_BOUND[0] # 二进制转十进制,在归一化塞入区间[0,10]中\n\ty = y_pop.dot(2**np.arange(DNA_SIZE)[::-1])/float(2**DNA_SIZE-1)*(Y_BOUND[1]-Y_BOUND[0])+Y_BOUND[0] # 二进制转十进制,在归一化塞入区间[0,10]中\n\treturn x,y\n\ndef getfitness(pop): # 计算适应度函数\n\tx,y = decodeDNA(pop)\n\ttemp = F(x, y)\n\treturn (temp - np.min(temp)) + 0.0001 # 减去最小的适应度是为了防止适应度出现负数\n\ndef select(pop, fitness):\t# 根据适应度选择(蒙特卡罗)\n\ttemp = np.random.choice(np.arange(POP_SIZE), size=POP_SIZE, replace=True, p=(fitness)/(fitness.sum()))\n\treturn pop[temp]\n\ndef crossmuta(pop, CROSS_RATE): # 种群的交叉变异操作\n\tnew_pop = []\n\tfor i in pop:\t\t# 遍历种群中的每一个个体,将该个体作为父代\n\t\ttemp = i\t\t# 子代先得到父亲的全部基因\n\t\tif np.random.rand() < CROSS_RATE:\t\t\t\t\t\t# 以交叉概率发生交叉\n\t\t\tj = pop[np.random.randint(POP_SIZE)]\t\t\t\t# 从种群中随机选择另一个个体,并将该个体作为母代\n\t\t\tcpoints1 = np.random.randint(0, DNA_SIZE*2-1) \t# 随机产生交叉的两个点(区间:[cpoints1, cpoints2])\n\t\t\tcpoints2 = np.random.randint(cpoints1,DNA_SIZE*2)\n\t\t\ttemp[cpoints1:cpoints2] = j[cpoints1:cpoints2] \t# 子代得到位于交叉点后的母代的基因\n\t\tmutation(temp,MUTA_RATE)\t\t\t\t\t\t\t\t# 每一个后代以变异率发生变异\n\t\tnew_pop.append(temp)\n\treturn new_pop\n\ndef mutation(temp, MUTA_RATE):\n\tif np.random.rand() < MUTA_RATE: \t\t\t\t\t\t# 以MUTA_RATE的概率进行变异\n\t\tmutate_point = np.random.randint(0, DNA_SIZE)\t\t# 随机产生一个实数,代表要变异基因的位置\n\t\ttemp[mutate_point] = temp[mutate_point] ^ 1 \t\t# 将变异点的二进制为反转\n\ndef print_info(pop): # 用于输出结果\n\tfitness = getfitness(pop)\n\tmaxfitness = np.argmax(fitness) # 返回最大值的索引值\n\tprint(\"迭代次数: \", Iterations)\n\tprint(\"最大适应度: \", fitness[maxfitness])\n\tx,y = decodeDNA(pop)\n\tprint(\"最优基因型: \", pop[maxfitness])\n\tprint(\"最优解 (x,y) = \", (x[maxfitness], y[maxfitness]))\n\tprint(\"最优值 F(x,y) = \", F(x[maxfitness],y[maxfitness]))\n\n# 画图\ndef plot_3d(ax):\n\tX = np.linspace(*X_BOUND, 100)\n\tY = np.linspace(*Y_BOUND, 100)\n\tX, Y = np.meshgrid(X, Y)\n\tZ = F(X, Y)\n\tax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm)\n\tax.set_zlim(-20, 100)\n\tax.set_xlabel('x')\n\tax.set_ylabel('y')\n\tax.set_zlabel('z')\n\tplt.pause(3)\n\tplt.show()\n\n# 画图\ndef draw(l1, l2, l3, l4, testStr):\n\tax1 = plt.subplot(131)\n\tax1.plot(l1, l2, 'b')\n\tax1.set_xlabel(testStr)\n\tax1.set_ylabel(\"COST_TIME\")\n\tax1.set_ylim(bottom=0)\n\tax2 = plt.subplot(132)\n\tax2.plot(l1, l4, 'r')\n\tax2.set_xlabel(testStr)\n\tax2.set_ylabel(\"BEST_F(X,Y)\")\n\tax2.set_ylim(bottom=0)\n\tax3 = plt.subplot(133)\n\tax3.plot(l1, l3, 'g')\n\tax3.set_xlabel(testStr)\n\tax3.set_ylabel(\"BEST_FITNESS\")\n\tax3.set_ylim(bottom=0)\n\tplt.show()\n\n# 研究单一参数的变化对求解结果和求解耗时的影响\n\n# 编码长度测试范围:[6,30],每一个长度重复测试 10 次来减小随机误差\ndef DNA_SIZE_TEST():\n\n\tdna_size_list = range(6,30,2)\n\tcost_time = []\n\tbest_fitness = []\n\tbest_f = []\n\tk = 10 # 重复次数,减小随机误差\n\n\tfor i in dna_size_list:\n\t\ttotal_time = 0\n\t\ttotal_fitness = 0\n\t\ttotal_f = 0\n\t\tfor j in range(k):\n\t\t\tglobal DNA_SIZE\n\t\t\tDNA_SIZE= i\n\t\t\tstart_t = time.time()\n\n\t\t\tpop = np.random.randint(2, size=(POP_SIZE, DNA_SIZE * 2)) # pop(二维矩阵) = 种群数 * (DNA长度 * 2) 个 0,1 随机数\n\t\t\tfor _ in range(Iterations): # 迭代 N 代\n\t\t\t\tx, y = decodeDNA(pop)\n\t\t\t\tpop = np.array(crossmuta(pop, CROSS_RATE)) # 对种群进行交叉(cross)和变异(muta)\n\t\t\t\tfitness = getfitness(pop) # 计算种群每一个基因的适应度函数\n\t\t\t\tpop = select(pop, fitness) # 选择生成新的种群\n\n\t\t\tend_t = time.time()\n\t\t\tfitness = getfitness(pop)\n\t\t\tmaxfitness = np.argmax(fitness)\n\t\t\tx, y = decodeDNA(pop)\n\n\t\t\ttotal_time += (end_t - start_t)\n\t\t\ttotal_fitness += fitness[maxfitness]\n\t\t\ttotal_f += F(x[maxfitness], y[maxfitness])\n\n\t\tcost_time.append(total_time / k)\n\t\tbest_fitness.append(total_fitness / k)\n\t\tbest_f.append(total_f / k)\n\n\tdraw(dna_size_list, cost_time, best_fitness, best_f, \"DNA_SIZE\")\n\n# 种群大小测试范围:[20,800],每一个长度重复测试 3 次来减小随机误差\ndef POP_SIZE_TEST():\n\n\tpop_size_list = range(20,800,20)\n\tcost_time = []\n\tbest_fitness = []\n\tbest_f = []\n\tk = 3\n\n\tfor i in pop_size_list:\n\t\ttotal_time = 0\n\t\ttotal_fitness = 0\n\t\ttotal_f = 0\n\t\tfor j in range(k):\n\t\t\tglobal POP_SIZE\n\t\t\tPOP_SIZE= i\n\t\t\tstart_t = time.time() # 开始计时\n\n\t\t\tpop = np.random.randint(2, size=(POP_SIZE, DNA_SIZE * 2)) # pop(二维矩阵) = 种群数 * (DNA长度 * 2) 个 0,1 随机数\n\t\t\tfor _ in range(Iterations): # 迭代 N 代\n\t\t\t\tx, y = decodeDNA(pop)\n\t\t\t\tpop = np.array(crossmuta(pop, CROSS_RATE)) # 对种群进行交叉(cross)和变异(muta)\n\t\t\t\tfitness = getfitness(pop) # 计算种群每一个基因的适应度函数\n\t\t\t\tpop = select(pop, fitness) # 选择生成新的种群\n\n\t\t\tend_t = time.time()\n\t\t\tfitness = getfitness(pop)\n\t\t\tmaxfitness = np.argmax(fitness)\n\t\t\tx, y = decodeDNA(pop)\n\n\t\t\ttotal_time += (end_t - start_t)\n\t\t\ttotal_fitness += fitness[maxfitness]\n\t\t\ttotal_f += F(x[maxfitness], y[maxfitness])\n\n\t\tcost_time.append(total_time / k)\n\t\tbest_fitness.append(total_fitness / k)\n\t\tbest_f.append(total_f / k)\n\n\tdraw(pop_size_list, cost_time, best_fitness, best_f, \"POP_SIZE\")\n\n# 交叉率测试范围:[0,1],每一个长度重复测试 10 次来减小随机误差\ndef CROSS_RATE_TEST():\n\n\tr_list = range(0,21)\n\tcr_list = []\n\tfor i in r_list:\n\t\tcr_list.append(i * 0.05)\n\n\tcost_time = []\n\tbest_fitness = []\n\tbest_f = []\n\tk = 10\n\n\tfor i in r_list:\n\t\ttotal_time = 0\n\t\ttotal_fitness = 0\n\t\ttotal_f = 0\n\t\tfor j in range(k):\n\t\t\tglobal CROSS_RATE\n\t\t\tCROSS_RATE = cr_list[i]\n\t\t\tstart_t = time.time() # 开始计时\n\t\t\tpop = np.random.randint(2, size=(POP_SIZE, DNA_SIZE * 2)) # pop(二维矩阵) = 种群数 * (DNA长度 * 2) 个 0,1 随机数\n\t\t\tfor _ in range(Iterations): # 迭代 N 代\n\t\t\t\tx, y = decodeDNA(pop)\n\t\t\t\tpop = np.array(crossmuta(pop, CROSS_RATE)) # 对种群进行交叉(cross)和变异(muta)\n\t\t\t\tfitness = getfitness(pop) # 计算种群每一个基因的适应度函数\n\t\t\t\tpop = select(pop, fitness) # 选择生成新的种群\n\n\t\t\tend_t = time.time()\n\t\t\tfitness = getfitness(pop)\n\t\t\tmaxfitness = np.argmax(fitness)\n\t\t\tx, y = decodeDNA(pop)\n\n\t\t\ttotal_time += (end_t - start_t)\n\t\t\ttotal_fitness += fitness[maxfitness]\n\t\t\ttotal_f += F(x[maxfitness], y[maxfitness])\n\n\t\tcost_time.append(total_time / k)\n\t\tbest_fitness.append(total_fitness / k)\n\t\tbest_f.append(total_f / k)\n\n\tdraw(cr_list, cost_time, best_fitness, best_f, \"CROSS_RATE\")\n\n# 变异率测试范围:[0,1],每一个长度重复测试 10 次来减小随机误差\ndef MUTA_RATE_TEST():\n\tr_list = range(0, 21)\n\tmr_list = []\n\tfor i in r_list:\n\t\tmr_list.append(i * 0.05)\n\n\tcost_time = []\n\tbest_fitness = []\n\tbest_f = []\n\tk = 10\n\n\tfor i in r_list:\n\t\ttotal_time = 0\n\t\ttotal_fitness = 0\n\t\ttotal_f = 0\n\t\tfor j in range(k):\n\t\t\tglobal MUTA_RATE\n\t\t\tMUTA_RATE = mr_list[i]\n\t\t\tstart_t = time.time() # 开始计时\n\t\t\tpop = np.random.randint(2, size=(POP_SIZE, DNA_SIZE * 2)) # pop(二维矩阵) = 种群数 * (DNA长度 * 2) 个 0,1 随机数\n\t\t\tfor _ in range(Iterations): # 迭代 N 代\n\t\t\t\tx, y = decodeDNA(pop)\n\t\t\t\tpop = np.array(crossmuta(pop, CROSS_RATE)) # 对种群进行交叉(cross)和变异(muta)\n\t\t\t\tfitness = getfitness(pop) # 计算种群每一个基因的适应度函数\n\t\t\t\tpop = select(pop, fitness) # 选择生成新的种群\n\n\t\t\tend_t = time.time()\n\t\t\tfitness = getfitness(pop)\n\t\t\tmaxfitness = np.argmax(fitness)\n\t\t\tx, y = decodeDNA(pop)\n\n\t\t\ttotal_time += (end_t - start_t)\n\t\t\ttotal_fitness += fitness[maxfitness]\n\t\t\ttotal_f += F(x[maxfitness], y[maxfitness])\n\n\t\tcost_time.append(total_time / k)\n\t\tbest_fitness.append(total_fitness / k)\n\t\tbest_f.append(total_f / k)\n\n\tdraw(mr_list, cost_time, best_fitness, best_f, \"MUTA_RATE\")\n\n# 迭代次数测试范围:[1,1000],每一个长度重复测试 5 次来减小随机误差\ndef ITERATION_TEST():\n\ti_list = range(1, 1010, 50)\n\tcost_time = []\n\tbest_fitness = []\n\tbest_f = []\n\tk = 10 # 重复次数,减小随机误差\n\n\tfor i in i_list:\n\t\ttotal_time = 0\n\t\ttotal_fitness = 0\n\t\ttotal_f = 0\n\t\tfor j in range(k):\n\t\t\tglobal Iterations\n\t\t\tIterations = i\n\t\t\tstart_t = time.time()\n\t\t\tpop = np.random.randint(2, size=(POP_SIZE, DNA_SIZE * 2)) # pop(二维矩阵) = 种群数 * (DNA长度 * 2) 个 0,1 随机数\n\t\t\tfor _ in range(Iterations): # 迭代 N 代\n\t\t\t\tx, y = decodeDNA(pop)\n\t\t\t\tpop = np.array(crossmuta(pop, CROSS_RATE)) # 对种群进行交叉(cross)和变异(muta)\n\t\t\t\tfitness = getfitness(pop) # 计算种群每一个基因的适应度函数\n\t\t\t\tpop = select(pop, fitness) # 选择生成新的种群\n\n\t\t\tend_t = time.time()\n\t\t\tfitness = getfitness(pop)\n\t\t\tmaxfitness = np.argmax(fitness)\n\t\t\tx, y = decodeDNA(pop)\n\n\t\t\ttotal_time += (end_t - start_t)\n\t\t\ttotal_fitness += fitness[maxfitness]\n\t\t\ttotal_f += F(x[maxfitness], y[maxfitness])\n\n\t\tcost_time.append(total_time / k)\n\t\tbest_fitness.append(total_fitness / k)\n\t\tbest_f.append(total_f / k)\n\n\tdraw(i_list, cost_time, best_fitness, best_f, \"ITERATIONS\")\n\n# 非优化迭代遗传代码\ndef NonOpt():\n\tstart_t = datetime.datetime.now()\n\tpop = np.random.randint(2, size=(POP_SIZE, DNA_SIZE * 2)) # pop(二维矩阵) = 种群数 * (DNA长度 * 2) 个 0,1 随机数\n\tfor _ in range(Iterations): # 迭代 N 代\n\t\tpop = np.array(crossmuta(pop, CROSS_RATE))\t\t# 对种群进行交叉(cross)和变异(muta)\n\t\tfitness = getfitness(pop)\t\t\t\t\t\t# 计算种群每一个基因的适应度函数\n\t\tpop = select(pop, fitness) \t\t\t\t\t# 选择生成新的种群\n\tend_t = datetime.datetime.now()\n\tprint(\"非优化\\n耗时: \",(end_t - start_t))\n\tprint_info(pop)\n\tfitness = getfitness(pop)\n\tmaxfitness = np.argmax(fitness)\n\tx, y = decodeDNA(pop)\n\treturn F(x[maxfitness],y[maxfitness])\n\n# 最佳个体保存优化遗传代码\ndef Opt_1():\n\tstart_t = datetime.datetime.now()\n\tpop = np.random.randint(2, size=(POP_SIZE, DNA_SIZE * 2)) # pop(二维矩阵) = 种群数 * (DNA长度 * 2) 个 0,1 随机数\n\tfor _ in range(Iterations): # 迭代 N 代\n\t\tpop = np.array(crossmuta(pop, CROSS_RATE))\t\t# 对种群进行交叉(cross)和变异(muta)\n\t\tfitness = getfitness(pop)\t\t\t\t\t\t# 计算种群每一个基因的适应度函数\n\t\tbest = pop[np.argmax(fitness)]\n\t\tpop = select(pop, fitness) \t\t\t\t\t# 选择生成新的种群\n\t\tpop[0] = best\n\tend_t = datetime.datetime.now()\n\tprint(\"\\n最佳个体保存\\n耗时: \",(end_t - start_t))\n\tprint_info(pop)\n\tfitness = getfitness(pop)\n\tmaxfitness = np.argmax(fitness)\n\tx, y = decodeDNA(pop)\n\treturn F(x[maxfitness],y[maxfitness])\n\n# 对比测试最佳个体保存与非优化代码的性能\ndef OPT1_TEST():\n\ti_list = range(100)\n\tf = []\n\tf_opt = []\n\tfor i in i_list:\n\t\tprint(i)\n\t\tf.append(NonOpt())\n\t\tf_opt.append(Opt_1())\n\n\tf.sort()\n\tf_opt.sort()\n\n\tplt.plot(i_list, f, marker='o', label=\"Non Optimized\")\n\tplt.plot(i_list, f_opt, marker='^', label=\"Best Preserve\")\n\tplt.gca().xaxis.set_major_locator(MultipleLocator(10))\n\tplt.legend()\n\tplt.show()\n\nif __name__ == \"__main__\":\n\n\tOPT1_TEST()\n\n\t# DNA_SIZE_TEST()\n\t# POP_SIZE_TEST()\n\t# CROSS_RATE_TEST()\n\t# MUTA_RATE_TEST()\n\t# ITERATION_TEST()\n\n\t# fig = plt.figure()\n\t# ax = Axes3D(fig)\n\t# plt.ion()\n\t# plot_3d(ax)\n\t#\n\t# start_t = datetime.datetime.now()\n\t# pop = np.random.randint(2, size=(POP_SIZE, DNA_SIZE * 2)) # pop(二维矩阵) = 种群数 * (DNA长度 * 2) 个 0,1 随机数\n\t# for _ in range(Iterations): # 迭代 N 代\n\t# \tx, y = decodeDNA(pop)\n\t#\n\t# \t# 更新画图\n\t# \tif 'sca' in locals():\n\t# \t\tsca.remove()\n\t# \tsca = ax.scatter(x, y, F(x, y), c='black', marker='o')\n\t# \tplt.show()\n\t# \tplt.pause(0.1)\n\t#\n\t# \tpop = np.array(crossmuta(pop, CROSS_RATE))\t\t# 对种群进行交叉(cross)和变异(muta)\n\t# \tfitness = getfitness(pop)\t\t\t\t\t\t# 计算种群每一个基因的适应度函数\n\t# \tpop = select(pop, fitness) \t\t\t\t\t# 选择生成新的种群\n\t#\n\t# end_t = datetime.datetime.now()\n\t# print(\"耗时: \",(end_t - start_t))\n\t# print_info(pop)\n\t# plt.ioff()\n\t# plot_3d(ax)\n\t","repo_name":"WondrousWisdomcard/SYSU-JuniorExperience","sub_path":"人工智能/作业/3 - 遗传算法求最值/ga_max.py","file_name":"ga_max.py","file_ext":"py","file_size_in_byte":12993,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"21"} +{"seq_id":"14328008323","text":"import os\nfrom pathlib import Path\n\nimport numpy as np\n\nfrom sklearn.model_selection import train_test_split\n\nfrom data.lfnlf import *\n\n\n# In general, for sequence dataset, the purpose is to learn the relationship between\n# (N, T, I) to (N, T, O)\n\n# LF, NLF\ntask = \"LF\"\nseq_len = 64\nN = 100000\nmemory_type = \"exp\"\n\ninput_dim = 1\noutput_dim = 1\nrho = np.zeros((seq_len, input_dim, output_dim))\nfor i in range(seq_len):\n rho[i, :, :] = np.exp(-i)\n\nsave_dir = Path(f\"./data/{task}_{N}/{memory_type}\")\nsave_dir.mkdir(exist_ok=True, parents=True)\nx, y = lfnlf(N, seq_len, input_dim, output_dim, rho, power=1.0 if task == \"LF\" else 2.0)\n\n# to do the train, test split\nx_train, y_train, x_valid, y_valid = train_test_split(\n x, y, test_size=0.2, random_state=2023\n)\n\nprint(x_train.shape, y_train.shape, x_valid.shape, y_valid.shape)\ntorch.save(torch.Tensor(x_train), save_dir / \"x_train.pt\")\ntorch.save(torch.Tensor(y_train), save_dir / \"y_train.pt\")\ntorch.save(torch.Tensor(x_valid), save_dir / \"x_valid.pt\")\ntorch.save(torch.Tensor(y_valid), save_dir / \"y_valid.pt\")\n","repo_name":"radarFudan/benchmark_sequence_modeling","sub_path":"prepare_datasets.py","file_name":"prepare_datasets.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"40571121085","text":"# -*- coding: utf-8 -*-\nfrom functools import wraps\nimport redis\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.views.decorators.http import require_POST\nfrom django.db.models import Q\nfrom django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden\nfrom django.shortcuts import get_object_or_404\nfrom django.contrib.auth import login\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.urlresolvers import reverse\nfrom annoying.decorators import render_to\n\nfrom forms import RoomForm\nfrom models import Room\n\nfrom app import settings\n\ndef create_redis_connection():\n return redis.Redis(host=settings.REDIS_HOST, db=settings.REDIS_DB)\n\ndef room_edit(func):\n @wraps(func)\n def inner(*args, **kwargs):\n request = args[0]\n room = get_object_or_404(Room, pk=kwargs['id'])\n del kwargs['id']\n\n if room.owner != request.user:\n return HttpResponseForbidden()\n\n kwargs['room'] = room\n\n return func(*args, **kwargs)\n return inner\n\ndef clear_room(request):\n from django.db import connection, transaction\n\n cursor = connection.cursor()\n transaction.enter_transaction_management(True)\n try:\n cursor.execute(\"SET foreign_key_checks = 0\")\n cursor.execute(\"TRUNCATE games_contact\")\n cursor.execute(\"TRUNCATE games_game\")\n cursor.execute(\"TRUNCATE rooms_room_invited\")\n cursor.execute(\"TRUNCATE rooms_room\")\n cursor.execute(\"TRUNCATE auth_user\")\n cursor.execute(\"SET foreign_key_checks = 1\")\n\n cursor.execute(\n u\"\"\"INSERT INTO `auth_user` VALUES\n (1,'admin','','','admin@gmail.com','pbkdf2_sha256$10000$LMadMmTEWbI8$elibdQDsHjTFJLF6ATj4mku521Rq7ckLjiebc/U/BrY=',1,1,1,'2013-01-13 11:40:00','2012-11-22 05:43:09'),\n (2,'john','John','Lennon','john@gmail.com','pbkdf2_sha256$10000$6YCa9QKIzUAI$VIAB/ReG83UVDiWUdLxFu1h5iYXrZ10nFbyuJCNAQTE=',0,1,0,'2013-01-13 11:13:23','2012-11-22 09:09:27'),\n (3,'paul','Paul','McCartney','paul@gmail.com','pbkdf2_sha256$10000$0vpsba0BUF5g$pIAdXv7YYeAEvwUHCUiloz5gQh9s/WXwnPW5FQ95fXo=',0,1,0,'2013-01-13 11:12:12','2012-11-22 09:09:47'),\n (4,'yoko','Yoko','Ono','yoko@gmail.com','pbkdf2_sha256$10000$B4bmcBiB97HB$xCU+w2FhYNu9Pa+YDIYORRJ2m82/4R3xXkEI5Te5tWc=',0,1,0,'2013-01-13 11:11:29','2012-11-22 09:11:11')\n \"\"\"\n )\n\n cursor.execute(u\"INSERT INTO `rooms_room` VALUES (1,'Любители моделей', 2, 0, 0)\")\n cursor.execute(u\"INSERT INTO `rooms_room` VALUES (2,'Экзистенциальная Россия', 2, 0, 0)\")\n\n cursor.execute(u\"INSERT INTO `games_game` VALUES (1,2,'моделирование',2,1,'running')\")\n cursor.execute(u\"INSERT INTO `games_game` VALUES (2,2,'пустота',8,2,'complete')\")\n cursor.execute(u\"INSERT INTO `games_contact` VALUES (1,1,'2012-11-22 16:56:25',3,'мода','как сказала Коко Шанель, она выходит сама из себя',NULL,NULL,NULL,1,0),(2,1,'2012-11-22 17:09:04',4,'моделирование','Оно бывает имитационным, эволюционным, и изредка даже психологическим',NULL,NULL,NULL,1,0)\")\n except Exception:\n transaction.rollback()\n return HttpResponse('bad')\n transaction.commit()\n\n redis_connection = create_redis_connection()\n\n redis_connection.publish('web_channel', 'reload_games')\n\n return HttpResponse(\"cleared\")\n\ndef redis_room_key(room_id):\n return 'room:' + str(room_id)\n\n@render_to('room/main.html')\n@login_required\ndef room(request, room_id):\n\n room = get_object_or_404(Room, pk=room_id)\n\n user = request.user\n\n if not room.has_user_access(user):\n return HttpResponseRedirect(reverse('rooms.views.list'))\n\n redis_connection = create_redis_connection()\n\n redis_connection.hset(redis_room_key(room_id), request.session.session_key, user.id)\n redis_connection.set('session_key:' + str(user.id), request.session.session_key)\n\n return { 'room' : room }\n\ndef authorize_user_and_redirect_to_room(request, room_id, user_id):\n user = User.objects.get(pk=user_id)\n user.backend = 'django.contrib.auth.backends.ModelBackend'\n login(request, user)\n\n return HttpResponseRedirect(reverse('rooms.views.room', args=[room_id]))\n\n@render_to('room/create.html')\n@login_required\ndef create(request):\n\n if request.method == 'POST':\n form = RoomForm(request.POST)\n\n if form.is_valid():\n room = form.save(commit=False)\n room.owner = request.user\n room.save()\n url = reverse('rooms.views.edit', args=[room.id])\n return HttpResponseRedirect(url)\n else:\n form = RoomForm()\n\n return { 'form' : form }\n\n@render_to('room/edit.html')\n@login_required\n@room_edit\ndef edit(request, room):\n was_private = room.is_private\n\n if request.method == 'POST':\n form = RoomForm(request.POST, instance=room)\n\n if form.is_valid():\n form.save(commit=False)\n room.save()\n\n if room.is_private and not was_private:\n redis = create_redis_connection()\n redis.publish('web_channel', 'room_private:' + str(room.id))\n try:\n redis.hdel(redis_room_key(room.id), *redis.hkeys(redis_room_key(room.id)))\n except:\n pass\n\n return HttpResponseRedirect(request.get_full_path())\n else:\n form = RoomForm(instance=room)\n\n response_data = { 'form' : form }\n\n if room.is_private:\n response_data['invited_users'] = room.invited.order_by('username').all()\n response_data['users'] = User.objects.exclude(pk=room.owner.id).exclude(rooms__pk=room.id).order_by('username').all()\n\n return response_data\n\n@csrf_protect\n@login_required\n@require_POST\n@room_edit\ndef delete(request, room):\n\n redis = create_redis_connection()\n redis.publish('web_channel', 'room_deleted:' + str(room.id))\n try:\n redis.hdel(redis_room_key(room.id), *redis.hkeys(redis_room_key(room.id)))\n except:\n pass\n\n room.delete()\n\n return HttpResponseRedirect(reverse('rooms.views.my_list'))\n\n@csrf_protect\n@login_required\n@require_POST\n@room_edit\ndef add_invite(request, room, user_id):\n if room.is_private:\n user = get_object_or_404(User, pk=user_id)\n\n room.invited.add(user)\n\n return HttpResponseRedirect(reverse('rooms.views.edit', args=[room.id]))\n\n@csrf_protect\n@login_required\n@require_POST\n@room_edit\ndef delete_invite(request, room, user_id):\n if room.is_private:\n user = get_object_or_404(User, pk=user_id)\n\n redis = create_redis_connection()\n redis.publish('web_channel', 'kick_user_from_room:' + str(room.id) + ':' + str(user.id))\n session_key = redis.get('session_key:' + str(user.id))\n try:\n redis.hdel(redis_room_key(room.id), session_key)\n except:\n pass\n\n room.invited.remove(user)\n\n return HttpResponseRedirect(reverse('rooms.views.edit', args=[room.id]))\n\n@render_to('room/list.html')\n@login_required\ndef list(request):\n rooms = Room.objects.filter(\n Q(is_private=False) | Q(owner__pk=request.user.id) | Q (invited__pk=request.user.id)\n ).distinct().order_by('-online_amount').all()\n return { 'rooms' : rooms }\n\n@render_to('room/my_list.html')\n@login_required\ndef my_list(request):\n rooms = Room.objects.filter(owner=request.user).all()\n return { 'rooms' : rooms }\n\n","repo_name":"dzharkov/kontakt-game","sub_path":"rooms/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7616,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"24897475625","text":"import os.path\r\n\r\nimport ssl\r\n\r\nimport pandas as pd\r\n\r\nimport tkinter as tk\r\nfrom tkinter import ttk\r\n\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sbs\r\nimport sqlite3\r\n\r\nfrom joblib import dump, load\r\nfrom matplotlib.backends._backend_tk import NavigationToolbar2Tk\r\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\r\nfrom matplotlib.figure import Figure\r\n\r\nfrom sklearn.metrics import accuracy_score, confusion_matrix, classification_report\r\nfrom screeninfo import get_monitors\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.model_selection import train_test_split, KFold, cross_val_score, GridSearchCV\r\n\r\n# panda data setup\r\n# =======================================================================================================================\r\ndataFile = 'pliktextowy.txt'\r\nmodelFile = 'model.joblib'\r\n\r\nssl._create_default_https_context = ssl._create_unverified_context\r\n\r\nurl = \"\"\r\nheaders = []\r\n\r\nfile1 = open(dataFile, 'r')\r\nlines = file1.readlines()\r\ncounter = 0\r\nfor line in lines:\r\n if counter == 0:\r\n url = line.rstrip()\r\n else:\r\n headers.append(line.rstrip())\r\n counter += 1\r\n\r\ntry:\r\n conn = sqlite3.connect('wines_dominik_nykiel')\r\n cursor = conn.cursor()\r\n cursor.execute(\"SELECT * from DataTable\")\r\n\r\n pandaData = pd.DataFrame(cursor.fetchall(), columns=headers)\r\n\r\nexcept sqlite3.Error as e:\r\n print(\"No data in database, creating new\")\r\n pandaData = pd.read_csv(url, names=headers)\r\n\r\n cursor.execute(\r\n 'CREATE TABLE IF NOT EXISTS DataTable (TypeOf number ,Alcohol number, Malic_acid number, Ash number, Alcalinity_of_ash number,Magnesium number,Total_phenols number,Flavanoids number,Nonflavanoid_phenols number,Proanthocyanins number,Color_intensity number,Hue number,OD280_OD315_of_diluted_wines number, Proline number)')\r\n\r\n pandaData.to_sql('DataTable', conn, if_exists='replace', index=False)\r\n\r\nfinally:\r\n if cursor:\r\n cursor.close()\r\n if conn:\r\n conn.close()\r\n\r\nX = pandaData.iloc[:, 1:].values\r\ny = pandaData.iloc[:, 0].values\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=2023)\r\n\r\nif os.path.exists(modelFile):\r\n knn = load(modelFile)\r\nelse:\r\n knn = KNeighborsClassifier(n_neighbors=5)\r\n# end of panda data setup\r\n# =======================================================================================================================\r\n\r\n# window initialize\r\n# =======================================================================================================================\r\n\r\nroot = tk.Tk()\r\nroot.title(\"kNN classifier\")\r\n\r\nscreen_width = get_monitors()[0].width\r\nscreen_height = get_monitors()[0].height\r\nroot.geometry(f\"{int(screen_width / 2) + 350}x{int(screen_height / 2)}\")\r\n\r\nleft_frame = tk.Frame(root, borderwidth=4, relief=\"ridge\", width=int(screen_width / 8), height=int(screen_width / 4))\r\nleft_frame.pack(side=\"left\", padx=10, pady=10)\r\nleft_frame.pack_propagate(False)\r\n\r\ngrade_frame = tk.Frame(root, width=int(screen_width / 4), height=left_frame[\"height\"], borderwidth=4, relief=\"ridge\")\r\ngrade_frame.pack(side=\"right\", padx=10, pady=10)\r\ngrade_frame.pack_propagate(False)\r\n\r\nresult_frame = tk.Frame(root, width=int(screen_width / 4), height=left_frame[\"height\"], borderwidth=4, relief=\"ridge\")\r\nresult_frame.pack(side=\"right\", padx=10, pady=10)\r\nresult_frame.pack_propagate(False)\r\n\r\ngradeText = tk.Text(grade_frame, height=100,\r\n width=80,\r\n bg=\"light yellow\")\r\n\r\nresultText = tk.Text(result_frame, height=100,\r\n width=200,\r\n bg=\"light cyan\")\r\n\r\ngradeLabel = tk.Label(grade_frame, text=\"Ocena modelu\")\r\ngradeLabel.pack()\r\ngradeText.pack()\r\nresultLabel = tk.Label(result_frame, text=\"Wynik dla rekordu\")\r\nresultLabel.pack()\r\nresultText.pack()\r\n\r\n\r\n# end of window initialize\r\n# =======================================================================================================================\r\n\r\n# data display functions\r\n# =======================================================================================================================\r\ndef fetch_data():\r\n try:\r\n myconn = sqlite3.connect('wines_dominik_nykiel')\r\n mycursor = myconn.cursor()\r\n mycursor.execute(\"SELECT * FROM DataTable\")\r\n result = mycursor.fetchall()\r\n return result\r\n except sqlite3.Error as exc:\r\n print(f\"Error: {exc}\")\r\n finally:\r\n if mycursor:\r\n mycursor.close()\r\n if myconn:\r\n myconn.close()\r\n\r\n\r\ndef displaydatawindow(dataToModify):\r\n displaywindow = tk.Toplevel(root)\r\n treeview = ttk.Treeview(displaywindow)\r\n treeview[\"columns\"] = headers\r\n treeview.column(\"#0\", width=0)\r\n\r\n for i in range(0, len(headers)):\r\n treeview.heading(headers[i], text=headers[i])\r\n treeview.pack(fill='x')\r\n\r\n h = ttk.Scrollbar(displaywindow, orient='horizontal', command=treeview.xview)\r\n h.pack(side='bottom', fill='x')\r\n treeview.configure(xscrollcommand=h.set)\r\n\r\n def load_data():\r\n data = fetch_data()\r\n treeview.delete(*treeview.get_children())\r\n for row in data:\r\n treeview.insert(\"\", \"end\", values=(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8],\r\n row[9], row[10], row[11], row[12], row[13]))\r\n\r\n def open_insert_window():\r\n\r\n new_window = tk.Toplevel(displaywindow)\r\n new_window.title(\"Dodaj nowy rekord\")\r\n\r\n type_label = ttk.Label(new_window, text=\"TypeOf\")\r\n type_label.pack()\r\n type_entry = ttk.Entry(new_window)\r\n type_entry.pack()\r\n\r\n alcohol_label = ttk.Label(new_window, text=\"Alcohol\")\r\n alcohol_label.pack()\r\n alcohol_entry = ttk.Entry(new_window)\r\n alcohol_entry.pack()\r\n\r\n acid_label = ttk.Label(new_window, text=\"Malic acid\")\r\n acid_label.pack()\r\n acid_entry = ttk.Entry(new_window)\r\n acid_entry.pack()\r\n\r\n ash_label = ttk.Label(new_window, text=\"Ash\")\r\n ash_label.pack()\r\n ash_entry = ttk.Entry(new_window)\r\n ash_entry.pack()\r\n\r\n alcaline_label = ttk.Label(new_window, text=\"Alcalinity of ash\")\r\n alcaline_label.pack()\r\n alcaline_entry = ttk.Entry(new_window)\r\n alcaline_entry.pack()\r\n\r\n magnesium_label = ttk.Label(new_window, text=\"Magnesium\")\r\n magnesium_label.pack()\r\n magnesium_entry = ttk.Entry(new_window)\r\n magnesium_entry.pack()\r\n\r\n phenols_label = ttk.Label(new_window, text=\"Total_phenols\")\r\n phenols_label.pack()\r\n phenols_entry = ttk.Entry(new_window)\r\n phenols_entry.pack()\r\n\r\n flavanoid_label = ttk.Label(new_window, text=\"Flavanoids\")\r\n flavanoid_label.pack()\r\n flavanoid_entry = ttk.Entry(new_window)\r\n flavanoid_entry.pack()\r\n\r\n nonflavanoid_label = ttk.Label(new_window, text=\"Nonflavanoid phenols\")\r\n nonflavanoid_label.pack()\r\n nonflavanoid_entry = ttk.Entry(new_window)\r\n nonflavanoid_entry.pack()\r\n\r\n proanth_label = ttk.Label(new_window, text=\"Proanthocyanins\")\r\n proanth_label.pack()\r\n proanth_entry = ttk.Entry(new_window)\r\n proanth_entry.pack()\r\n\r\n color_label = ttk.Label(new_window, text=\"Color_intensity\")\r\n color_label.pack()\r\n color_entry = ttk.Entry(new_window)\r\n color_entry.pack()\r\n\r\n hue_label = ttk.Label(new_window, text=\"Hue\")\r\n hue_label.pack()\r\n hue_entry = ttk.Entry(new_window)\r\n hue_entry.pack()\r\n\r\n dilute_label = ttk.Label(new_window, text=\"OD280_OD315_of_diluted_wines\")\r\n dilute_label.pack()\r\n dilute_entry = ttk.Entry(new_window)\r\n dilute_entry.pack()\r\n\r\n proline_label = ttk.Label(new_window, text=\"Proline\")\r\n proline_label.pack()\r\n proline_entry = ttk.Entry(new_window)\r\n proline_entry.pack()\r\n\r\n def add_new():\r\n\r\n new_type = type_entry.get()\r\n new_alcohol = alcohol_entry.get()\r\n new_acid = acid_entry.get()\r\n new_ash = ash_entry.get()\r\n new_alcaline = alcaline_entry.get()\r\n new_magnesium = magnesium_entry.get()\r\n new_phenols = phenols_entry.get()\r\n new_flavanoid = flavanoid_entry.get()\r\n new_nonflava = nonflavanoid_entry.get()\r\n new_proanth = proanth_entry.get()\r\n new_color = color_entry.get()\r\n new_hue = hue_entry.get()\r\n new_dilute = dilute_entry.get()\r\n new_proline = proline_entry.get()\r\n try:\r\n connfunc = sqlite3.connect('wines_dominik_nykiel')\r\n cursorfunc = connfunc.cursor()\r\n sql = \"INSERT INTO DataTable (TypeOf, Alcohol, Malic_acid, Ash ,Alcalinity_of_ash ,Magnesium, Total_phenols, Flavanoids, Nonflavanoid_phenols, Proanthocyanins, Color_intensity, Hue, OD280_OD315_of_diluted_wines, Proline) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)\"\r\n params = (new_type, new_alcohol, new_acid, new_ash, new_alcaline, new_magnesium, new_phenols,\r\n new_flavanoid, new_nonflava, new_proanth, new_color, new_hue, new_dilute, new_proline)\r\n cursorfunc.execute(sql, params)\r\n connfunc.commit()\r\n except sqlite3.Error as e:\r\n print(f\"Error: {e}\")\r\n finally:\r\n if cursorfunc:\r\n cursorfunc.close()\r\n if connfunc:\r\n connfunc.close()\r\n\r\n load_data()\r\n new_window.destroy()\r\n\r\n update_button = tk.Button(new_window, text=\"Dodaj rekord\", command=add_new)\r\n update_button.pack()\r\n\r\n def save_data(dataToSave):\r\n dataToSave = pd.DataFrame(fetch_data(), columns=headers)\r\n print(dataToSave.shape)\r\n\r\n add_button = tk.Button(displaywindow, text=\"Dodaj nowy rekord\", command=open_insert_window)\r\n add_button.pack(side='left')\r\n\r\n save_button = tk.Button(displaywindow, text=\"Zapisz rekord\", command=lambda: save_data(dataToModify))\r\n save_button.pack(side='left')\r\n\r\n load_data()\r\n\r\n\r\ndef show_plot(data):\r\n new_window = tk.Toplevel(root)\r\n\r\n figure = Figure(figsize=(6, 6))\r\n ax = figure.subplots()\r\n\r\n sbs.scatterplot(x=data['Alcohol'], y=data['Flavanoids'], hue=data['TypeOf'], ax=ax)\r\n\r\n canvas = FigureCanvasTkAgg(figure, master=new_window)\r\n\r\n canvas.draw()\r\n\r\n canvas.get_tk_widget().pack()\r\n\r\n\r\n toolbar = NavigationToolbar2Tk(canvas,\r\n new_window)\r\n toolbar.update()\r\n\r\n canvas.get_tk_widget().pack()\r\n\r\n\r\n# end of display functions\r\n# =======================================================================================================================\r\n\r\n# actual model functions\r\n# =======================================================================================================================\r\n\r\ndef trainnewmodel(currentModel, testset_X, testset_Y, trainset_X, trainset_Y):\r\n trainset_X, testset_X, trainset_Y, testset_Y = train_test_split(X, y, test_size=0.25, random_state=2023)\r\n print(testset_X)\r\n model = KNeighborsClassifier(n_neighbors=5)\r\n print(trainset_X.shape)\r\n print(trainset_Y.shape)\r\n model.fit(trainset_X, trainset_Y)\r\n param_grid = {\r\n 'n_neighbors': list(range(1, 21)),\r\n 'metric': ['euclidean', 'manhattan']\r\n }\r\n grid_search = GridSearchCV(model, param_grid, cv=KFold(n_splits=5, random_state=2023, shuffle=True))\r\n grid_search.fit(trainset_X, trainset_Y)\r\n currentModel = grid_search.best_estimator_\r\n print(currentModel)\r\n dump(grid_search.best_estimator_, modelFile)\r\n\r\n\r\ndef test_new_model(testset_X, testset_Y, trainset_X, trainset_Y, kNN_model):\r\n gradeText.delete(\"1.0\", \"end\")\r\n\r\n best_predict = kNN_model.predict(testset_X)\r\n\r\n gradeText.insert(tk.END, f\"Dokładność modelu na zbiorze testowym: {accuracy_score(testset_Y, best_predict)} \\n\")\r\n\r\n best_predict_train = kNN_model.predict(trainset_X)\r\n\r\n gradeText.insert(tk.END,\r\n f\"Dokładność modelu na zbiorze treningowym: {accuracy_score(trainset_Y, best_predict_train)} \\n\")\r\n\r\n cm_train = confusion_matrix(trainset_Y, best_predict_train)\r\n gradeText.insert(tk.END, f\"Macierz pomyłek dla zbioru treningowego:\\n {cm_train} \\n\")\r\n\r\n report = classification_report(trainset_Y, best_predict_train)\r\n gradeText.insert(tk.END, report)\r\n\r\n\r\ndef test_new_recordwindow(model):\r\n new_window = tk.Toplevel(root)\r\n new_window.title(\"Przetestuj rekord\")\r\n\r\n alcohol_label = ttk.Label(new_window, text=\"Alcohol\")\r\n alcohol_label.pack()\r\n alcohol_entry = ttk.Entry(new_window)\r\n alcohol_entry.pack()\r\n\r\n acid_label = ttk.Label(new_window, text=\"Malic acid\")\r\n acid_label.pack()\r\n acid_entry = ttk.Entry(new_window)\r\n acid_entry.pack()\r\n\r\n ash_label = ttk.Label(new_window, text=\"Ash\")\r\n ash_label.pack()\r\n ash_entry = ttk.Entry(new_window)\r\n ash_entry.pack()\r\n\r\n alcaline_label = ttk.Label(new_window, text=\"Alcalinity of ash\")\r\n alcaline_label.pack()\r\n alcaline_entry = ttk.Entry(new_window)\r\n alcaline_entry.pack()\r\n\r\n magnesium_label = ttk.Label(new_window, text=\"Magnesium\")\r\n magnesium_label.pack()\r\n magnesium_entry = ttk.Entry(new_window)\r\n magnesium_entry.pack()\r\n\r\n phenols_label = ttk.Label(new_window, text=\"Total_phenols\")\r\n phenols_label.pack()\r\n phenols_entry = ttk.Entry(new_window)\r\n phenols_entry.pack()\r\n\r\n flavanoid_label = ttk.Label(new_window, text=\"Flavanoids\")\r\n flavanoid_label.pack()\r\n flavanoid_entry = ttk.Entry(new_window)\r\n flavanoid_entry.pack()\r\n\r\n nonflavanoid_label = ttk.Label(new_window, text=\"Nonflavanoid phenols\")\r\n nonflavanoid_label.pack()\r\n nonflavanoid_entry = ttk.Entry(new_window)\r\n nonflavanoid_entry.pack()\r\n\r\n proanth_label = ttk.Label(new_window, text=\"Proanthocyanins\")\r\n proanth_label.pack()\r\n proanth_entry = ttk.Entry(new_window)\r\n proanth_entry.pack()\r\n\r\n color_label = ttk.Label(new_window, text=\"Color_intensity\")\r\n color_label.pack()\r\n color_entry = ttk.Entry(new_window)\r\n color_entry.pack()\r\n\r\n hue_label = ttk.Label(new_window, text=\"Hue\")\r\n hue_label.pack()\r\n hue_entry = ttk.Entry(new_window)\r\n hue_entry.pack()\r\n\r\n dilute_label = ttk.Label(new_window, text=\"OD280_OD315_of_diluted_wines\")\r\n dilute_label.pack()\r\n dilute_entry = ttk.Entry(new_window)\r\n dilute_entry.pack()\r\n\r\n proline_label = ttk.Label(new_window, text=\"Proline\")\r\n proline_label.pack()\r\n proline_entry = ttk.Entry(new_window)\r\n proline_entry.pack()\r\n\r\n def test_new_record(currentmodel):\r\n new_alcohol = alcohol_entry.get()\r\n new_acid = acid_entry.get()\r\n new_ash = ash_entry.get()\r\n new_alcaline = alcaline_entry.get()\r\n new_magnesium = magnesium_entry.get()\r\n new_phenols = phenols_entry.get()\r\n new_flavanoid = flavanoid_entry.get()\r\n new_nonflava = nonflavanoid_entry.get()\r\n new_proanth = proanth_entry.get()\r\n new_color = color_entry.get()\r\n new_hue = hue_entry.get()\r\n new_dilute = dilute_entry.get()\r\n new_proline = proline_entry.get()\r\n newRecord = [[float(new_alcohol), float(new_acid), float(new_ash), float(new_alcaline), float(new_magnesium),\r\n float(new_phenols), float(new_flavanoid),\r\n float(new_nonflava), float(new_proanth), float(new_color), float(new_hue), float(new_dilute),\r\n float(new_proline)]]\r\n resultText.insert(tk.END, f\"Wynik dla rekordu {newRecord}: \\n {currentmodel.predict(newRecord)} \\n\")\r\n new_window.destroy()\r\n\r\n make_predict = tk.Button(new_window, text=\"Klasyfikuj!\", command=lambda: test_new_record(model))\r\n make_predict.pack()\r\n\r\n\r\n# end of model functions\r\n# =======================================================================================================================\r\n\r\n\r\ndata_button = tk.Button(left_frame, text=\"Pokaż dane\", command=lambda: displaydatawindow(pandaData))\r\ndata_button.pack(anchor=\"w\", padx=10, pady=10)\r\n\r\ntest_button = tk.Button(left_frame, text=\"Testuj model\",\r\n command=lambda: test_new_model(X_test, y_test, X_train, y_train, knn))\r\ntest_button.pack(anchor=\"w\", padx=10, pady=10)\r\n\r\ntrain_button = tk.Button(left_frame, text=\"Wytrenuj nowy model\",\r\n command=lambda: trainnewmodel(knn, X_test, y_test, X_train, y_train))\r\ntrain_button.pack(anchor=\"w\", padx=10, pady=10)\r\n\r\npredict_button = tk.Button(left_frame, text=\"Wprowadz rekord do klasyfikacji\",\r\n command=lambda: test_new_recordwindow(knn))\r\npredict_button.pack(anchor=\"w\", padx=10, pady=10)\r\n\r\nplot_button = tk.Button(left_frame, text=\"Pokaż wykres danych\", command=lambda: show_plot(pandaData))\r\nplot_button.pack(anchor=\"w\", padx=10, pady=10)\r\nroot.mainloop()\r\n","repo_name":"DominikNykiel/PPYProjektZaliczeniowy","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":17033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38246514456","text":"from itertools import starmap\nfrom operator import mul\n\n\ndef matvecmul(A, b):\n return [sum(starmap(mul, zip(b, col))) for col in zip(*A)]\n\n\n# нулевая матрица\ndef zeromatrix(rows, cols):\n return [[0 for x in range(cols)] for y in range(rows)]\n\n\ndef zerovector(size):\n if size <= 0:\n raise Exception(\"size {} must be > 0\\n\".format(size))\n\n return [0 for x in range(size)]\n\n\ndef onesvector(size):\n if size <= 0:\n raise Exception(\"size {} must be > 0\\n\".format(size))\n\n return [1 for x in range(size)]\n\n\ndef constvector(size, const=1):\n if size <= 0:\n raise Exception(\"size {} must be > 0\\n\".format(size))\n\n return [const for x in range(size)]\n\n\ndef onehotvec(size, index):\n if size <= 0:\n raise Exception(\"size {} must be > 0\\n\".format(size))\n\n res = zerovector(size)\n for i in range(size):\n if i == index:\n res[i] = 1\n break\n\n return res\n\n\ndef identity_matrix(rows, cols):\n if rows != cols:\n raise Exception(\"Identity matrix must be squared\\n\")\n\n I = zeromatrix(rows, cols)\n for i in range(len(I)):\n I[i][i] = 1\n\n return I\n\n\n# 2D matrices only\ndef dim(A):\n return [len(A), len(A[0])]\n\ndef vecvecadd(a, b):\n if type(a) is not list or type(b) is not list:\n raise Exception(\"{} and {} must be represented as lists\".format(a, b))\n if len(a) != len(b):\n raise Exception(\"{} and {} must have the same size\".format(a, b))\n\n res = zerovector(len(a))\n\n for i in range(len(res)):\n res[i] = a[i] + b[i]\n\n return res\n\n\ndef vecvecsub(a, b):\n if type(a) is not list or type(b) is not list:\n raise Exception(\"{} and {} must be represented as lists\".format(a, b))\n\n if len(a) != len(b):\n raise Exception(\"{} and {} must have the same size\".format(a, b))\n\n res = zerovector(len(a))\n\n for i in range(len(res)):\n res[i] = a[i] - b[i]\n\n return res\n\n\n\n# matrix multiplication\ndef matmatmul(A, B):\n # dimension checking\n if dim(A) == dim(B):\n res = zeromatrix(len(A), len(A))\n else:\n res = zeromatrix(len(A), len(B[0]))\n\n\n for i in range(len(A)):\n for j in range(len(B[0])):\n sum = 0\n for k in range(len(A[0])):\n sum += A[i][k] * B[k][j]\n\n res[i][j] = sum\n\n return res\n\n\ndef matmatadd(A, B):\n if dim(A) != dim(B):\n raise Exception(\"different size of {} and {}\".format(A, B))\n\n res = zeromatrix(len(A), len(A[0]))\n\n for i in range(len(res)):\n for j in range(len(res[0])):\n res[i][j] = A[i][j] + B[i][j]\n\n return res\n\ndef matmatsub(A, B):\n if dim(A) != dim(B):\n raise Exception(\"different size of {} and {}\".format(A, B))\n\n res = zeromatrix(len(A), len(A[0]))\n\n for i in range(len(res)):\n for j in range(len(res[0])):\n res[i][j] = A[i][j] - B[i][j]\n\n return res\n\n\ndef pure_print(matr, name=\"Matrix\", clear=False):\n\n if type(matr[0]) is not list: # if a 1D vector\n print('-' * 5, name, '-' * 5)\n print(str(matr).replace('[', '(').replace(']', ')'))\n\n else: # if a 2D matrix\n print('-' * 5, name, '-' * 5, '\\n')\n for row in range(len(matr)):\n print('\\t', end='')\n for col in range(len(matr[0])):\n\n print( \"{} \".format(\n (matr[row][col]) if not clear\n else (round_float(matr[row][col])) ), end='')\n\n print(\"\\n\")\n print('-'*(12+len(name))) # 10 + 2 spaces around a name\n\n\ndef round_float(num, limit=2):\n return float((\"%.\" + str(limit) + \"f\") % num)\n\n\ndef round_matrix(matr, limit=2):\n for i in range(len(matr)):\n for j in range(len(matr[0])):\n matr[i][j] = round_float(matr[i][j])\n\n return matr\n\n\ndef matrix_norm(matr, p=\"cheb\", clear=True):\n\n if type(matr) is not list or type(matr[0]) is not list:\n raise Exception(\"{} must be matrix (repr. as list of lists)\\n\".format(matr))\n\n n_rows = len(matr)\n n_cols = len(matr[0])\n\n res_norm = None\n\n if p is not None and p.isdigit(): # эта норма захватывает Фробениуса(евклидова) при p=2\n p = float(p) # is p was a string\n s = 0\n\n for i in range(n_rows):\n for j in range(n_cols):\n s += pow(abs(matr[i][j]), p)\n\n res_norm = pow(s, 1/p)\n\n\n elif p is \"maxnorm\" or p is \"max\":\n res_norm = matr[0][0]\n\n for i in range(n_rows):\n for j in range(n_cols):\n if matr[i][j] > res_norm:\n res_norm = matr[i][j]\n\n\n elif p is \"one\":\n\n res_norm = 0\n\n for i in range(n_cols):\n s = 0\n for j in range(n_rows):\n s += abs(matr[i][j])\n if s > res_norm:\n res_norm = s\n\n elif p is \"cheb\":\n\n res_norm = 0\n\n for i in range(n_rows):\n s = 0\n for j in range(n_cols):\n s += abs(matr[i][j])\n if s > res_norm:\n res_norm = s\n\n\n else:\n raise Exception(\"I cannot realize this norm, sorry\\n\")\n\n return round_float(res_norm) if clear else res_norm\n\n\ndef vecnorm(a, norm=\"cheb\"):\n\n max = 0\n\n for i in range(len(a)):\n if abs(a[i]) > max:\n max = abs(a[i])\n\n return max\n\n\nA = [\n [1, 2],\n [3, 4]\n]\n\n\nU0 = [\n [0.6, -0.5],\n [0.1, 0.6]\n]\n\n\ndef is_square(matr):\n if len(matr) != len(matr[0]):\n return False\n\n return True\n\n\ndef schultz_method(A, U0, eps=1e-5, m=1, n_iterations=20, clear=False):\n\n if not is_square(A):\n raise Exception(\"Only square matrix can be inverse\")\n\n rows = len(A)\n cols = len(A[0])\n\n U = U0\n\n\n for i in range(n_iterations):\n k = i\n PHI = matmatsub(identity_matrix(rows, cols), matmatmul(A, U))\n\n\n if matrix_norm(PHI) <= eps:\n break # we find inverse matrix\n\n U = matmatmul(U, matmatadd(identity_matrix(rows, cols), PHI))\n\n if matrix_norm(PHI) >= 1:\n break\n\n\n return U if not clear else round_matrix(U)\n","repo_name":"telaust/computational_methods","sub_path":"schultz_method/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8075902341","text":"from scipy.spatial import Voronoi, voronoi_plot_2d\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nnp.random.seed(5)\n\npoints = np.random.uniform(0, 1, (10, 2))\n\nvor = Voronoi(points)\n\nadyacencias = []\nbarreras = []\n\nfor key, vals in vor.ridge_dict.items():\n if -1 not in vals:\n adyacencias.append(key)\n barreras.append(vals)\n \nprint(adyacencias)\nprint(barreras)\n\nfig = voronoi_plot_2d(vor)\nplt.show()\n\ndef recorta_barrera(barrera):\n \"\"\" Input: Lista de dos vertices \"\"\"\n \n contador = 0\n \n tupla = ()\n for key, values in vor.ridge_dict.items():\n contador += 1\n if values[0] == barrera[0] and values[1] == barrera[1]:\n tupla = key\n \n if contador > len(vor.ridge_dict)+1:\n P1 = [0, 0]\n P2 = [0, 0]\n \n return P1, P2\n \n PB1 = vor.vertices[barrera[0]]\n PB2 = vor.vertices[barrera[1]]\n \n dB12 = PB2 - PB1\n mod_dB12 = np.linalg.norm(dB12)\n \n dB12_u = dB12/mod_dB12\n \n PN1 = vor.points[tupla[0]]\n PN2 = vor.points[tupla[1]]\n \n M = (PN1 + PN2) / 2\n \n dM1 = M - PB1\n mod_dM1 = np.linalg.norm(dM1)\n \n landa = mod_dM1/mod_dB12\n \n deviation = min(landa, 1-landa)\n \n P1 = M + dB12*deviation\n P2 = M - dB12*deviation\n \n return P1, P2\n\ndef genera_separacion(adyacencia):\n # dr = np.array(punto2)-np.array(punto1)\n \n # dr_u = dr/np.sqrt(np.sum(dr**2))\n \n barrera = vor.ridge_dict[adyacencia]\n # contador = 0\n #\n # tupla = ()\n # for key, values in vor.ridge_dict.items():\n # contador += 1\n # if values[0] == barrera[0] and values[1] == barrera[1]:\n # tupla = key\n #\n # if contador > len(vor.ridge_dict)+1:\n # P1 = [0, 0]\n # P2 = [0, 0]\n #\n # return P1, P2\n \n PB1 = vor.vertices[barrera[0]]\n PB2 = vor.vertices[barrera[1]]\n \n dB12 = PB2 - PB1\n mod_dB12 = np.linalg.norm(dB12)\n \n dB12_u = dB12/mod_dB12\n \n PN1 = vor.points[adyacencia[0]]\n PN2 = vor.points[adyacencia[1]]\n \n M = (PN1 + PN2) / 2\n \n dM1 = M - PB1\n mod_dM1 = np.linalg.norm(dM1)\n \n landa = mod_dM1/mod_dB12\n \n # alpha = np.random.rand()\n # beta = np.random.rand()\n \n alpha = 0.1\n beta = 0.1\n \n deviation = min(landa, 1-landa)\n \n P1 = M + dB12*deviation\n P2 = M - dB12*deviation\n \n return P1, P2\n\n\nfig, ax = plt.subplots()\n\nplt.axis([0, 1, 0, 1])\n\nfor i in points:\n plt.scatter(i[0], i[1], s = 5, c = 'black')\n\n# plt.scatter(points[:, 0], points[:, 1], s = 1)\n\n\nfor a in adyacencias:\n P1, P2 = genera_separacion(a)\n ax.plot([P1[0], P2[0]], [P1[1], P2[1]], '-', color = 'red')\n # ax.annotate(str(b), xy = (P1[0], P1[1]))\n \n # PB1 = vor.vertices[b[0]]\n # PB2 = vor.vertices[b[1]]\n #\n # ax.plot([PB1[0], PB2[0]], [PB1[1], PB2[1]], '-', color = 'red')\n \n \n\nplt.show()\n\n \n \n \n \n# import matplotlib.pyplot as plt\n#\n# fig, ax = plt.subplots()\n#\n# # plt.axis([0, 10, 0, 10])\n# fig = voronoi_plot_2d(vor, ax = ax)\n#\n# ax.set_xlim(left = -2, right = 2)\n# ax.set_ylim(bottom = -2, top = 2)\n#\n# plt.show()\n\n","repo_name":"z72vamac/Routes_Barriers","sub_path":"voronoid.py","file_name":"voronoid.py","file_ext":"py","file_size_in_byte":3136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72398020533","text":"def func(n):\n letters = 'abcdefghijklmnopqrstuvwxyz'\n for i in range(n):\n ans = ''\n k = list(map(int,input().split()))\n length = k[0]\n unique = letters[:k[2]]\n # lenght_substring = k[1]\n ans += unique * (length//len(unique))\n i = 0\n while len(ans)!=length:\n ans += unique[i]\n i += 1\n print(ans)\nn = int(input())\nfunc(n)","repo_name":"bijeshofficial/coding_solutions","sub_path":"CodeForces/1335B.Construct_The_String.py","file_name":"1335B.Construct_The_String.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13791232273","text":"import numpy as np\nfrom kudzunn.data import Data\n\n\ndef test_data_length():\n x = np.zeros(10)\n y = np.zeros(10)\n d = Data(x, y)\n assert len(d) == 10\n\n\ndef test_data_getitem():\n x = np.arange(10, dtype=int)\n y = np.arange(10, dtype=int)\n d = Data(x, y)\n for i in range(10):\n assert d[i] == (i, i)\n","repo_name":"rahuldave/kudzunn","sub_path":"kudzunn/tests/data_test.py","file_name":"data_test.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37218130321","text":"class Solution:\n def rearrangeBarcodes(self, barcodes: 'List[int]') -> 'List[int]':\n hmp = {}\n for b in barcodes:\n if b not in hmp:\n hmp[b] = []\n hmp[b].append(b)\n pool = sorted(hmp.values(), key =lambda x: len(x), reverse = True)\n temp = []\n for p in pool:\n for c in p:\n temp.append(c)\n i = temp[:len(temp)//2]\n j = temp[len(temp)//2:]\n output = []\n while i:\n output.append(i.pop())\n if j :\n output.append(j.pop())\n if i:\n output.append(i.pop())\n if j:\n output.append(j.pop())\n return output","repo_name":"renjieliu/leetcode","sub_path":"1001_1499/1054.py","file_name":"1054.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"14086995083","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom google.appengine.ext import ndb\n\n\nclass ToDoList(ndb.Model):\n user = ndb.UserProperty(required=True)\n task = ndb.StringProperty(required=True)\n created = ndb.DateTimeProperty(auto_now_add=True)\n is_done = ndb.BooleanProperty()","repo_name":"DimaSavkov/todo-list","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23587402501","text":"texto = input(\"Introduce una cita de tu agrado: \").lower()\n\nletra1 = input(\"Dime 1 letra de tu elección al azar: \").lower()\nletra2 = input(\"Dime una 2a letra de tu elección al azar: \").lower()\nletra3 = input(\"Dime una 3a letra de tu elección al azar: \").lower()\n\nsolucion1 = texto.count(letra1)\nsolucion2= texto.count(letra2)\nsolucion3= texto.count(letra3)\n\nprint(f\"La letra '{letra1}' aparece {solucion1} veces en tu texto. \")\nprint(f\"La letra '{letra2}' aparece {solucion2} veces en tu texto. \")\nprint(f\"La letra '{letra3}' aparece {solucion3} veces en tu texto. \")\n\npalabras = texto.split()\nprint(f\"Tu texto tiene un total de {len(palabras)} palabras! ¿Qué te parece? \")\nletra_inicial= texto[0]\n\nprint(f\"La primera letra que aparece en el texto es la : '{letra_inicial}' \")\n\nletra_final = texto[-1]\nprint(f\"La última letra de tu texto es la: '{letra_final}' \")\n\ntexto_al_reves = texto[::-1]\ntexto_unido = \" \".join(texto_al_reves)\nprint(f\"Tu texto al reves se vería así: '{texto_unido}' \")\n\nbuscar_python = \"Python\" in texto\ndic= {True: 'si', False: 'no'}\nprint(f\"La palabra 'Python' {dic[buscar_python]} aparece en tu texto. \")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"GemmaOro/textAnalyzer","sub_path":"analizador_de_texto.py","file_name":"analizador_de_texto.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18327782777","text":"#!/usr/bin/env python3\n\nfrom block import *\nimport base64\nimport re\nimport logging\nfrom manager import *\nfrom block import *\n\nlog_level = logging.INFO\nlogging.basicConfig(level = log_level)\nlogger = logging.getLogger(__name__)\nlogger.level = log_level\n\nclass Partitioner:\n def __init__(self, author, files):\n logger.info(\"[PARTITIONER] - Initializing\")\n self.files = files\n self.author = author\n\n def get_code_elements(self):\n fileblocks=[]\n for fil in self.files:\n if not fil.patch: #its a blob \n continue\n fileblocks+=DiffParser.parse(self.author,fil.patch)\n return fileblocks\nregex=\"@@ -(\\d+),(\\d+) \\+(\\d+),(\\d+) @@\"\nignoreregex=\"-(.*)\"\nmylineregex=\"\\+(.*)\"\nclass DiffParser:\n\n @staticmethod\n def parse(author,diff):\n blocks=[]\n wantedStuff=[]\n difftuple=diff.split(\"\\n\")[4:]\n for line in difftuple:\n diffsectionstart=re.match(r\"\"+regex,line)\n nomatch=re.match(r\"\"+ignoreregex,line)\n myline=re.match(r\"\"+mylineregex,line)\n if nomatch:\n continue\n elif diffsectionstart:\n print(\"Making block\")\n if not wantedStuff:\n continue\n blocks+=[Block(wantedStuff)]\n wantedStuff=[]\n elif not diffsectionstart and not nomatch:\n if myline:\n wantedStuff+=[Line(author,0,myline.group(1))]\n else:\n wantedStuff+=[Line(\"\",0,line)]\n logger.info(\"It matched\")\n else:\n logger.info(\"Da fak?\")\n blocks+=[Block(wantedStuff)]\n return blocks\n #self.old_start_line=match.group(1)\n #self.old_line_size=match.group(2)\n #self.new_start_line=match.group(3)\n #self.new_line_size=match.group(4)\n","repo_name":"Gisson/rosmaninho_natural_laranjas_wth","sub_path":"src/partitioner.py","file_name":"partitioner.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35245445722","text":"from collections import deque\nimport numpy as np\nimport heapq\n\ndef solution(queue):\n answer = []\n h= []\n myq = deque()\n h.append((0, queue, [0,0,0]))\n\n q = deque(queue)\n while h:\n (s, numbers, cost) = heapq.heappop(h)\n \n if numbers.count(1) + cost[0] == numbers.count(2) + cost[1] == numbers.count(3) + cost[2]:\n answer = list(cost)\n break\n\n ar_1 = np.add(cost, [1,0,0])\n ar_2 = np.add(cost, [0,1,0])\n ar_3 = np.add(cost, [0,0,1])\n heapq.heappush(h, (sum(ar_1), numbers[1:], list(map(int, ar_1)) ))\n heapq.heappush(h, (sum(ar_2), numbers[1:], list(map(int, ar_2)) ))\n heapq.heappush(h, (sum(ar_3), numbers[1:], list(map(int, ar_3)) ))\n \n # myq.append((numbers[1:], list(map(int, np.add(cost, [1,0,0]))) ))\n # myq.append((numbers[1:], list(map(int, np.add(cost, [0,1,0]))) ))\n # myq.append((numbers[1:], list(map(int, np.add(cost, [0,0,1]))) ))\n \n \n return answer\n\nprint(solution([2,1,3,1,2,1]))\nprint(solution([3,3,3,3,3,3]))\nprint(solution([1,2,3]))","repo_name":"lyh951212/algorithm","sub_path":"est/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26722500655","text":"from datetime import datetime\nimport requests\nfrom django.conf import settings\nfrom django.shortcuts import render, redirect\n\n# Create your views here.\nfrom post.models import Job, MyHistory, Email\n\n\ndef set_main(request):\n if request.method == 'POST':\n name = request.POST['name']\n email = request.POST['email']\n message = request.POST['message']\n \n order = f'Dear Nursultan new message for you \\nName: {name}\\nEmail: {email}\\nMessage: {message}\\nДата отправки: {datetime.now()}'\n base_url = f'https://api.telegram.org/bot5421110622:AAGTrih1SWDeaAEnn2S6erFwFu7Q1hPob5s/sendMessage?chat_id=-716220787&text={order}'\n requests.get(base_url)\n\n \n \n redirect('home')\n\n job = Job.objects.all()\n his = MyHistory.objects.all()\n con = {\n 'jobs': job,\n 'histories': his,\n # 'first': MyHistory.objects.filter(date__lte='2014-01-01').last()\n 'first': MyHistory.objects.all().filter(date__lte='2014-01-01')\n }\n\n return render(request, \"index.html\", con)\n","repo_name":"NURSLORD/thnurs27","sub_path":"post/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31697206427","text":"from ib_insync import *\nfrom os import listdir, remove\nfrom time import sleep\nimport pickle\nimport pandas as pd\nimport datetime\nfrom helper_functions import *\n\n# Define your variables here ###########################################################################################\nsampling_rate = 1 # How often, in seconds, to check for inputs from Dash?\n# For TWS Paper account, default port is 7497\n# For IBG Paper account, default port is 4002\nport = 7497\n# choose your master id. Mine is 10645. You can use whatever you want, just set it in API Settings within TWS or IBG.\nmaster_client_id = 10645\n# choose your dedicated id just for orders. I picked 1111.\norders_client_id = 1111\n# account number: you'll need to fill in yourself. The below is one of my paper trader account numbers.\nacc_number = 'DU3526916'\n########################################################################################################################\n\n# Run your helper function to clear out any io files left over from old runs\ncheck_for_and_del_io_files()\n\n# Create an IB app; i.e., an instance of the IB() class from the ib_insync package\nib = IB()\n# Connect your app to a running instance of IBG or TWS\nib.connect(host='127.0.0.1', port=port, clientId=master_client_id)\n\n# Make sure you're connected -- stay in this while loop until ib.isConnected() is True.\nwhile not ib.isConnected():\n sleep(.01)\n\n# If connected, script proceeds and prints a success message.\nprint('Connection Successful!')\n\ncontracts = [Stock('IVV', 'SMART', 'USD'),\n Stock('QQQ', 'SMART', 'USD'),\n Stock('URTH', 'SMART', 'USD'),\n Stock('DIA', 'SMART', 'USD')]\n\nall_tickers = [\"IVV\", \"QQQ\", \"URTH\", \"DJ\"]\n# Main while loop of the app. Stay in this loop until the app is stopped by the user.\nwhile True:\n if 'training_data.txt' in listdir():\n f = open('training_data.txt', 'r')\n date = f.readline()\n dates = date.split(\"-\")\n f.close()\n for i in range(len(contracts)):\n bars = ib.reqHistoricalData(\n contracts[i],\n endDateTime=datetime.datetime(int(dates[0]), int(dates[1]), int(dates[2])), durationStr='5 Y', barSizeSetting='1 day', whatToShow='TRADES', useRTH=True\n )\n df = pd.DataFrame(bars)\n df.volume = df.volume * 100\n df.to_csv('data/' + all_tickers[i] + \"_\" + date + \".csv\")\n # If the app finds a file named 'tickers.txt' in the current directory, enter this code block.\n if 'ticker_n.txt' in listdir():\n # Code goes here...\n f = open('ticker_n.txt', 'r')\n # Will need to read all tickers\n n = f.readline()\n f.close()\n bars = ib.reqHistoricalData(\n contracts[0],\n endDateTime='', durationStr=n+' D', barSizeSetting='1 day', whatToShow='TRADES', useRTH=True\n )\n df = pd.DataFrame(bars)\n df.to_csv(\"data.csv\")\n pass\n\n # If there's a file named trade_order.p in listdir(), then enter the loop below.\n if 'trade_order.p' in listdir():\n\n # Create a special instance of IB() JUST for entering orders.\n # The reason for this is because the way that Interactive Brokers automatically provides valid order IDs to\n # ib_insync is not trustworthy enough to really rely on. It's best practice to set aside a dedicated client ID\n # to ONLY be used for submitting orders, and close the connection when the order is successfully submitted.\n\n # your code goes here\n trd_ordr = pickle.load(open(\"trade_order.p\", \"rb\"))\n mrk_ordr = MarketOrder(action=trd_ordr['action'], totalQuantity=trd_ordr['trade_amt'], account=acc_number)\n cntrct = Forex(pair=trd_ordr['trade_currency'])\n ib_orders = IB()\n ib_orders.connect(host='127.0.0.1', port=port, clientId=orders_client_id)\n while not ib_orders.isConnected():\n sleep(.01)\n new_order = ib_orders.placeOrder(cntrct, mrk_ordr)\n # The new_order object returned by the call to ib_orders.placeOrder() that you've written is an object of class\n # `trade` that is kept continually updated by the `ib_insync` machinery. It's a market order; as such, it will\n # be filled immediately.\n # In this while loop, we wait for confirmation that new_order filled.\n while not new_order.orderStatus.status == 'Filled':\n ib_orders.sleep(0) # we use ib_orders.sleep(0) from the ib_insync module because the async socket connection\n # is not built for the normal time.sleep() function.\n\n # your code goes here\n remove('trade_order.p')\n ib_orders.disconnect()\n # pass: same reason as above.\n pass\n\n # sleep, for the while loop.\n ib.sleep(sampling_rate)\n\nib.disconnect()","repo_name":"farukuslu/trading_algorithm_interactive_brokers","sub_path":"ibkr_app.py","file_name":"ibkr_app.py","file_ext":"py","file_size_in_byte":4824,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"41567276501","text":"#!/usr/bin/env python3\n\nimport datetime\nimport time\n\n\ndef format_timedelta(d):\n rem = d.total_seconds()\n\n if rem > 86400:\n out_format = \"{days:d}d{hours:02d}h{minutes:02d}m{seconds:02d}s\"\n elif rem > 3600:\n out_format = \"{hours:d}h{minutes:02d}m{seconds:02d}s\"\n elif rem > 60:\n out_format = \"{minutes:2d}m{seconds:02d}s\"\n else:\n out_format = \"{seconds:02d}.{ms:03d}s\"\n\n days = int(rem // 86400)\n rem -= days * 86400\n\n hours = int(rem // 3600)\n rem -= hours * 3600\n\n minutes = int(rem // 60)\n rem -= minutes * 60\n\n seconds = int(rem)\n rem -= seconds\n\n ms = int(rem * 1000)\n rem -= ms / 1000\n\n return out_format.format(\n days=days, hours=hours, minutes=minutes, seconds=seconds, ms=ms\n )\n\n\nclass Timer:\n def __init__(self, message=None):\n self.message = message\n\n self.start_datetime = None\n self.end_datetime = None\n self.start_time = None\n self.end_time = None\n\n def __enter__(self):\n self.start_datetime = datetime.datetime.now()\n self.start_time = time.perf_counter()\n\n if self.message is not None:\n print(\"[{}]\\t{}\\r\".format(self.start_datetime, self.message), end=\"\")\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.end_datetime = datetime.datetime.now()\n self.end_time = time.perf_counter()\n\n self.elapsed_time = datetime.timedelta(seconds=self.end_time - self.start_time)\n\n if self.message is not None:\n clear_len = len(self.message) + 20\n print(\n \"\\r{}\\r[{}]\\t{}\".format(\n \" \" * clear_len, format_timedelta(self.elapsed_time), self.message\n )\n )\n","repo_name":"csullivan/settings","sub_path":"pylib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35727668402","text":"from sklearn.model_selection import ParameterGrid\n\nfrom tagger.model_solver import Solver\n\n\ndef grid_search():\n \"\"\"Method helps to make a grid search\"\"\"\n grid = {\n 'E_DIM': [64],\n 'H_DIM': [64],\n 'lr': [0.1, 0.01],\n 'token_mode': ['form', 'lemma'],\n 'epochs': [50],\n }\n\n # datapath = 'Taiga'\n datapath = 'ENG_EWT'\n\n for conf, hypers in enumerate(sorted(ParameterGrid(grid), key=lambda x: list(x.values()))):\n solver = Solver(conf, hypers, datapath)\n solver.train()\n results = solver.evaluate()\n print('Evaluation results:')\n print(\"\\n\".join(results))\n\n\ndef train():\n hypers = {\n 'E_DIM': 64,\n 'H_DIM': 64,\n 'lr': 0.1,\n 'token_mode': 'lemma',\n 'epochs': 50,\n }\n\n datapath = 'Taiga'\n\n solver = Solver(4,hypers, datapath)\n solver.train()\n results = solver.evaluate()\n print('Evaluation results:')\n print(\"\\n\".join(results))\n\n\nif __name__ == '__main__':\n # grid_search()\n train()","repo_name":"kamilkoduo/lstm-pos-tagger","sub_path":"tagger/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"12525684116","text":"import tkinter as tk\nfrom InterpolationClass import InterpolationClass\n\nfrom InterfaceToSQL import MyConnector as Connector\n\nclass Window(tk.Tk):\n def __init__(self, *args, **kwargs):\n super().__init__()\n\n self.connector = Connector('127.0.0.1', '3306', 'interpolations_output', 'root', 'F0ll0wSQL')\n self.picture = InterpolationClass()\n self.output = None\n self.keys = None\n self.time = 0\n self.polygon_coords = {}\n self.polygon_coords['xy'] = []\n self.polygon_coords['xz'] = []\n self.polygon_coords['zy'] = []\n self.label_vx0 = tk.Label(self, text='Vx0')\n self.label_vy0 = tk.Label(self, text='Vy0')\n self.label_vz0 = tk.Label(self, text='Vz0')\n self.label_x0 = tk.Label(self, text='X0')\n self.label_y0 = tk.Label(self, text='Y0')\n self.label_z0 = tk.Label(self, text='Z0')\n self.label_t0 = tk.Label(self, text='t0')\n self.label_t = tk.Label(self, text='t')\n self.label_H1 = tk.Label(self, text='Секунды')\n self.label_H2 = tk.Label(self, text='Секунды') \n self.label_ax = tk.Label(self, text='Ax')\n self.label_ay = tk.Label(self, text='Ay')\n self.label_az = tk.Label(self, text='Az')\n self.label_vx = tk.Label(self, text='Vx')\n self.label_vy = tk.Label(self, text='Vy')\n self.label_vz = tk.Label(self, text='Vz')\n self.label_x = tk.Label(self, text='X')\n self.label_y = tk.Label(self, text='Y')\n self.label_z = tk.Label(self, text='Z')\n self.label_min = tk.Label(self, text='Min')\n self.label_max = tk.Label(self, text='Max')\n self.label_step_h = tk.Label(self, text='Шаг h')\n\n self.entry_vx0 = tk.Entry(self)\n self.entry_vy0 = tk.Entry(self)\n self.entry_vz0 = tk.Entry(self)\n self.entry_ax0 = tk.Entry(self)\n self.entry_ay0 = tk.Entry(self)\n self.entry_az0 = tk.Entry(self)\n self.entry_x0 = tk.Entry(self)\n self.entry_y0 = tk.Entry(self)\n self.entry_z0 = tk.Entry(self)\n self.entry_t0 = tk.Entry(self)\n self.entry_vx = tk.Entry(self)\n self.entry_vy = tk.Entry(self)\n self.entry_vz = tk.Entry(self)\n self.entry_ax = tk.Entry(self)\n self.entry_ay = tk.Entry(self)\n self.entry_az = tk.Entry(self)\n self.entry_x = tk.Entry(self)\n self.entry_y = tk.Entry(self)\n self.entry_z = tk.Entry(self)\n self.entry_t = tk.Entry(self)\n\n self.entry_vx0.insert(0, '0')\n self.entry_vy0.insert(0, '0.026')\n self.entry_vz0.insert(0, '0.01')\n self.entry_x0.insert(0, '0.5')\n self.entry_y0.insert(0, '0.5')\n self.entry_z0.insert(0, '0.2')\n self.entry_t0.insert(0, '0')\n self.entry_t.insert(0, '4000')\n self.entry_vx['state'] = tk.DISABLED\n self.entry_vy['state'] = tk.DISABLED\n self.entry_vz['state'] = tk.DISABLED\n self.entry_ax['state'] = tk.DISABLED\n self.entry_ay['state'] = tk.DISABLED\n self.entry_az['state'] = tk.DISABLED\n self.entry_x['state'] = tk.DISABLED\n self.entry_y['state'] = tk.DISABLED\n self.entry_z['state'] = tk.DISABLED\n \n self.computate = tk.Button(self, text='Рассчитать', command=self.computation)\n\n self.scale = tk.Scale(self, orient=tk.HORIZONTAL, length=400, from_=1, to=1, tickinterval=1,\n resolution=1, command=self.output_data)\n\n self.canvas_xy = tk.Canvas(self, width=0, height=0)\n self.canvas_zy = tk.Canvas(self, width=0, height=0)\n self.canvas_xz = tk.Canvas(self, width=0, height=0)\n \n self.label_vx0.grid(row=1, column=1)\n self.label_vy0.grid(row=1, column=2)\n self.label_vz0.grid(row=1, column=3)\n self.entry_vx0.grid(row=2, column=1)\n self.entry_vy0.grid(row=2, column=2)\n self.entry_vz0.grid(row=2, column=3)\n self.label_x0.grid(row=3, column=1)\n self.label_y0.grid(row=3, column=2)\n self.label_z0.grid(row=3, column=3)\n self.entry_x0.grid(row=4, column=1)\n self.entry_y0.grid(row=4, column=2)\n self.entry_z0.grid(row=4, column=3)\n self.label_t0.grid(row=5, column=1, columnspan=3)\n self.entry_t0.grid(row=6, column=1, columnspan=2)\n self.label_t.grid(row=7, column=1, columnspan=3)\n self.entry_t.grid(row=8, column=1, columnspan=2)\n self.label_H1.grid(row=6, column=3)\n self.label_H2.grid(row=8, column=3)\n self.computate.grid(row=9, column=1, columnspan=3)\n self.label_ax.grid(row=10, column=1)\n self.label_ay.grid(row=10, column=2)\n self.label_az.grid(row=10, column=3)\n self.entry_ax.grid(row=11, column=1)\n self.entry_ay.grid(row=11, column=2)\n self.entry_az.grid(row=11, column=3)\n self.label_vx.grid(row=12, column=1)\n self.label_vy.grid(row=12, column=2)\n self.label_vz.grid(row=12, column=3)\n self.entry_vx.grid(row=13, column=1)\n self.entry_vy.grid(row=13, column=2)\n self.entry_vz.grid(row=13, column=3)\n self.label_x.grid(row=14, column=1)\n self.label_y.grid(row=14, column=2)\n self.label_z.grid(row=14, column=3)\n self.entry_x.grid(row=15, column=1)\n self.entry_y.grid(row=15, column=2)\n self.entry_z.grid(row=15, column=3)\n self.label_step_h.grid(row=16, column=1, columnspan=3)\n self.scale.grid(row=17, column=1, columnspan=3)\n self.canvas_xy.grid(row=18, column=4)\n self.canvas_zy.grid(row=18, column=1, columnspan=3)\n self.canvas_xz.grid(row=1, column=4, rowspan=17)\n\n def computation(self):\n '''\n Эта функция запускается после нажатия кнопки\n '''\n self.picture.Input(\n float(self.entry_vx0.get()), \n float(self.entry_vy0.get()),\n float(self.entry_vz0.get()),\n float(self.entry_x0.get()),\n float(self.entry_y0.get()),\n float(self.entry_z0.get()),\n float(self.entry_t0.get()),\n float(self.entry_t.get())\n )\n self.output, self.keys = self.picture.Calc()\n \"\"\"\n for i in range(0, len(self.keys), 60):\n self.connector.insert(table='variables', \n Vx=str(self.output[self.keys[i]]['vx']),\n Vy=str(self.output[self.keys[i]]['vy']),\n Vz=str(self.output[self.keys[i]]['vz']),\n Ax=str(self.output[self.keys[i]]['ax']),\n Ay=str(self.output[self.keys[i]]['ay']),\n Az=str(self.output[self.keys[i]]['az']),\n X=str(self.output[self.keys[i]]['x']),\n Y=str(self.output[self.keys[i]]['y']),\n Z=str(self.output[self.keys[i]]['z']),\n Time=self.keys[i])\n\"\"\"\n self.entry_vx['state'] = tk.NORMAL\n self.entry_vy['state'] = tk.NORMAL\n self.entry_vz['state'] = tk.NORMAL\n self.entry_ax['state'] = tk.NORMAL\n self.entry_ay['state'] = tk.NORMAL\n self.entry_az['state'] = tk.NORMAL\n self.entry_x['state'] = tk.NORMAL\n self.entry_y['state'] = tk.NORMAL\n self.entry_z['state'] = tk.NORMAL\n \n self.entry_vx.delete(0, 'end')\n self.entry_vy.delete(0, 'end')\n self.entry_vz.delete(0, 'end')\n self.entry_ax.delete(0, 'end')\n self.entry_ay.delete(0, 'end')\n self.entry_az.delete(0, 'end')\n self.entry_x.delete(0, 'end')\n self.entry_y.delete(0, 'end')\n self.entry_z.delete(0, 'end')\n \n self.entry_vx.insert(0, str(self.output[self.keys[0]]['vx']))\n self.entry_vy.insert(0, str(self.output[self.keys[0]]['vy']))\n self.entry_vz.insert(0, str(self.output[self.keys[0]]['vz']))\n self.entry_ax.insert(0, str(self.output[self.keys[0]]['ax']))\n self.entry_ay.insert(0, str(self.output[self.keys[0]]['ay']))\n self.entry_az.insert(0, str(self.output[self.keys[0]]['az']))\n self.entry_x.insert(0, str(self.output[self.keys[0]]['x']))\n self.entry_y.insert(0, str(self.output[self.keys[0]]['y']))\n self.entry_z.insert(0, str(self.output[self.keys[0]]['z']))\n\n self.entry_vx['state'] = tk.DISABLED\n self.entry_vy['state'] = tk.DISABLED\n self.entry_vz['state'] = tk.DISABLED\n self.entry_ax['state'] = tk.DISABLED\n self.entry_ay['state'] = tk.DISABLED\n self.entry_az['state'] = tk.DISABLED\n self.entry_x['state'] = tk.DISABLED\n self.entry_y['state'] = tk.DISABLED\n self.entry_z['state'] = tk.DISABLED\n\n self.scale['from_'] = self.keys[0]\n self.scale['to'] = self.keys[len(self.keys)-1] - 60\n self.scale['tickinterval'] = self.keys[len(self.keys)-1] - self.keys[0]\n self.scale['resolution'] = 60\n \n self.init_reposition_variables()\n self.draw()\n self.update()\n\n def output_data(self, time):\n self.time = int(time)\n self.entry_vx['state'] = tk.NORMAL\n self.entry_vy['state'] = tk.NORMAL\n self.entry_vz['state'] = tk.NORMAL\n self.entry_ax['state'] = tk.NORMAL\n self.entry_ay['state'] = tk.NORMAL\n self.entry_az['state'] = tk.NORMAL\n self.entry_x['state'] = tk.NORMAL\n self.entry_y['state'] = tk.NORMAL\n self.entry_z['state'] = tk.NORMAL\n \n self.entry_vx.delete(0, 'end')\n self.entry_vy.delete(0, 'end')\n self.entry_vz.delete(0, 'end')\n self.entry_ax.delete(0, 'end')\n self.entry_ay.delete(0, 'end')\n self.entry_az.delete(0, 'end')\n self.entry_x.delete(0, 'end')\n self.entry_y.delete(0, 'end')\n self.entry_z.delete(0, 'end')\n \n self.entry_vx.insert(0, str(self.output[self.keys[self.time]]['vx']))\n self.entry_vy.insert(0, str(self.output[self.keys[self.time]]['vy']))\n self.entry_vz.insert(0, str(self.output[self.keys[self.time]]['vz']))\n self.entry_ax.insert(0, str(self.output[self.keys[self.time]]['ax']))\n self.entry_ay.insert(0, str(self.output[self.keys[self.time]]['ay']))\n self.entry_az.insert(0, str(self.output[self.keys[self.time]]['az']))\n self.entry_x.insert(0, str(self.output[self.keys[self.time]]['x']))\n self.entry_y.insert(0, str(self.output[self.keys[self.time]]['y']))\n self.entry_z.insert(0, str(self.output[self.keys[self.time]]['z']))\n\n self.entry_vx['state'] = tk.DISABLED\n self.entry_vy['state'] = tk.DISABLED\n self.entry_vz['state'] = tk.DISABLED\n self.entry_ax['state'] = tk.DISABLED\n self.entry_ay['state'] = tk.DISABLED\n self.entry_az['state'] = tk.DISABLED\n self.entry_x['state'] = tk.DISABLED\n self.entry_y['state'] = tk.DISABLED\n self.entry_z['state'] = tk.DISABLED\n\n self.draw()\n\n def init_reposition_variables(self):\n self.maximum_x = self.output[self.keys[0]]['x']\n self.maximum_y = self.output[self.keys[0]]['y']\n self.maximum_z = self.output[self.keys[0]]['z']\n self.minimum_x = self.output[self.keys[0]]['x']\n self.minimum_y = self.output[self.keys[0]]['y']\n self.minimum_z = self.output[self.keys[0]]['z']\n for i in self.output:\n if self.output[i]['x'] > self.maximum_x:\n self.maximum_x = self.output[i]['x']\n if self.output[i]['x'] < self.minimum_x:\n self.minimum_x = self.output[i]['x']\n if self.output[i]['y'] > self.maximum_y:\n self.maximum_y = self.output[i]['y']\n if self.output[i]['y'] < self.minimum_y:\n self.minimum_y = self.output[i]['y']\n if self.output[i]['z'] > self.maximum_z:\n self.maximum_z = self.output[i]['z']\n if self.output[i]['z'] < self.minimum_z:\n self.minimum_z = self.output[i]['z']\n\n self.polygon_coords['xy'].clear()\n self.polygon_coords['xz'].clear()\n self.polygon_coords['zy'].clear() \n for i in self.keys:\n x, y, z = self.output[i]['x'], self.output[i]['y'], self.output[i]['z']\n self.polygon_coords['xy'].append((x - self.minimum_x) * (390 - 10) / (self.maximum_x - self.minimum_x) + 10)\n self.polygon_coords['xy'].append((y - self.minimum_y) * (390 - 10) / (self.maximum_y - self.minimum_y) + 10)\n self.polygon_coords['xz'].append((x - self.minimum_x) * (390 - 10) / (self.maximum_x - self.minimum_x) + 10)\n self.polygon_coords['xz'].append((z - self.minimum_z) * (390 - 10) / (self.maximum_z - self.minimum_z) + 10)\n self.polygon_coords['zy'].append((z - self.minimum_z) * (390 - 10) / (self.maximum_z - self.minimum_z) + 10)\n self.polygon_coords['zy'].append((y - self.minimum_y) * (390 - 10) / (self.maximum_y - self.minimum_y) + 10)\n\n def draw(self):\n self.canvas_xy.delete('ALL')\n self.canvas_zy.delete('ALL')\n self.canvas_xz.delete('ALL')\n self.canvas_xy.create_rectangle(-1, -1, 2000, 2000, fill='white')\n self.canvas_zy.create_rectangle(-1, -1, 2000, 2000, fill='white')\n self.canvas_xz.create_rectangle(-1, -1, 2000, 2000, fill='white')\n \n self.canvas_xz['height'] = self.canvas_zy['width'] = 400\n self.canvas_xy['width'] = self.canvas_xz['width'] = 400\n self.canvas_xy['height'] = self.canvas_zy['height'] = 400\n \n x, y, z = self.output[self.keys[self.time]]['x'], self.output[self.keys[self.time]]['y'], self.output[self.keys[self.time]]['z']\n\n x = (x - self.minimum_x) * (390 - 10) / (self.maximum_x - self.minimum_x) + 10 \n y = (y - self.minimum_y) * (390 - 10) / (self.maximum_y - self.minimum_y) + 10 \n z = (z - self.minimum_z) * (390 - 10) / (self.maximum_z - self.minimum_z) + 10\n\n self.canvas_xy.create_line(*self.polygon_coords['xy'], fill=None)\n self.canvas_zy.create_line(*self.polygon_coords['zy'], fill=None)\n self.canvas_xz.create_line(*self.polygon_coords['xz'], fill=None)\n\n self.canvas_xy.create_oval(x-5, y-5, x+5, y+5)\n self.canvas_zy.create_oval(z-5, y-5, z+5, y+5)\n self.canvas_xz.create_oval(x-5, z-5, x+5, z+5)\n\n self.canvas_xy.update()\n self.canvas_zy.update()\n self.canvas_xz.update()","repo_name":"Ariken12/InterpolationGUI_firstTry","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":14550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10323356993","text":"from modules.base_module import Module\nfrom modules.location import refresh_avatar\nimport asyncio\nimport random\nimport time\n\nclass_name = \"NewYear2021\"\n\n\nclass NewYear2021(Module):\n prefix = \"ny21\"\n\n def __init__(self, server):\n self.server = server\n self.commands = {\"ts\": self.throw, \"cf\": self.fireball, \"ci\": self.iceball, \"bi\": self.buy_item}\n self.config = server.parser.parse_ny21()\n \n async def get_info(self, uid, client=None):\n info = {\"stcp\": True, \"mtct\": 999999, \"lmpt\": 1606654102, \"sbct\": 999999, \"sfcnt\": 999999, \"lps\": 999999, \"ishc\": 999999, \"spct\": 999999,\n \"sftlct\": 999999, \"sfhmct\": 999999, \"sfvnct\": 999999, \"shldct\": 999999, \"cstlv\": 999999, \"cstct\": 999999, \"brnchct\": 999999, \"lbftm\": 1606654102}\n if client:\n await client.send([\"ny21.ui\", {\"if\": info}])\n await refresh_avatar(client, self.server)\n return info\n \n async def iceball(self, msg, client):\n online = self.server.online\n loop = asyncio.get_event_loop()\n room = self.server.rooms[client.room].copy()\n anim = random.choice([\"Protect\", \"Freezing\"])\n for uid in room:\n try:\n tmp = online[uid]\n except KeyError:\n continue\n loop.create_task(tmp.send([\"o.r.ny21.ci\", {\"ui\": client.uid, \"to\": msg[2][\"to\"], \"ai\": anim}])) \n\t\t\n async def fireball(self, msg, client):\n online = self.server.online\n loop = asyncio.get_event_loop()\n room = self.server.rooms[client.room].copy()\n for uid in room:\n try:\n tmp = online[uid]\n except KeyError:\n continue\n loop.create_task(tmp.send([\"o.r.ny21.cf\", {\"ui\": client.uid, \"to\": msg[2][\"to\"]}])) \n\t\t\n async def throw(self, msg, client):\n online = self.server.online\n loop = asyncio.get_event_loop()\n room = self.server.rooms[client.room].copy()\n for uid in room:\n try:\n tmp = online[uid]\n except KeyError:\n continue\n loop.create_task(tmp.send([\"ny21.ts\", {\"ui\": client.uid, \"ti\": msg[2][\"ti\"]}]))\n \n async def buy_item(self, msg, client):\n item = msg[2][\"tpid\"]\n if not item or item not in self.config[\"items\"] or not self.config[\"items\"][item]:\n return\n count = int(msg[2][\"cnt\"])\n type_ = self.config[\"items\"][item][\"type\"]\n price = self.config[\"items\"][item][\"price\"] * count\n info = await self.get_info(client.uid, client=client)\n if info[\"sfcnt\"] < price:\n return\n #await self.server.redis.decrby(f\"uid:{client.uid}:ny21:snowflake\", price)\n if type_ == \"clothes\":\n await self.server.inv[client.uid].add_item(item, \"cls\")\n await self.server.inv[client.uid].change_wearing(item, True)\n elif type_ == \"furniture\":\n await self.server.inv[client.uid].add_item(item, \"frn\", count)\n elif type_ == \"clothesSet\":\n if await self.server.redis.incrby(f\"uid:{client.uid}:appearance:gender\", 0) == 1:\n gender = \"boy\"\n else:\n gender = \"girl\"\n ctp = await self.server.redis.get(f\"uid:{client.uid}:wearing\")\n for cloth in await self.server.redis.smembers(f\"uid:{client.uid}:{ctp}\"):\n await self.server.inv[client.uid].change_wearing(cloth, False)\n for cloth in self.server.modules[\"a\"].sets[gender][item]:\n if await self.server.redis.sismember(f\"uid:{client.uid}:items\", cloth):\n continue\n await self.server.inv[client.uid].add_item(cloth, \"cls\")\n await self.server.inv[client.uid].change_wearing(cloth, True)\n await self.server.modules[\"a\"].update_crt(client.uid)\n clothes = await self.server.get_clothes(client.uid, type_=2)\n inv = self.server.inv[client.uid].get()\n resources = await self.server.get_resources(client.uid)\n cloth_rating = await self.server.redis.incrby(f\"uid:{client.uid}:crt\", 0)\n await self.get_info(client.uid, client=client)\n await client.send([\"ny21.bi\", {\"tpid\": item, \"cnt\": count, \"clths\": clothes, \"inv\": inv, \"res\": resources, \"crt\": cloth_rating}])","repo_name":"coolgromov/AvaBox","sub_path":"modules/newyear2021.py","file_name":"newyear2021.py","file_ext":"py","file_size_in_byte":4331,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"42734916649","text":"#!/usr/bin/env python\nimport boto3\nimport json\n\nTABLE_NAME='lastKnown'\n# INDEX_NAME='scan_index'\n\nclient = boto3.client('dynamodb')\n\nresponse = client.scan(\n TableName = TABLE_NAME,\n \n)\n\nprint(json.dumps(response, indent=4))\n","repo_name":"alejandromb/utils_and_courses","sub_path":"dynamodb/aws-dynamodb-fundamentals/06/demos/ScanTable.py","file_name":"ScanTable.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4969936883","text":"#coding:utf-8\r\n'''\r\n测试说明:\r\n此测试是针对redis_wrap库进行自动json编码的测试\r\n\r\n测试结果:\r\nRan 5 tests in 0.036s\r\n\r\nFAILED (failures=1)\r\n\r\n失败原因是在对list进行remove(value)操作的时候,redis的lrem无法删除序列化后的对象,\r\nset类型能正常remove序列化后的对象.\r\n\r\n@author: Zay\r\n'''\r\nimport unittest\r\nfrom ztq_core import get_redis, get_list, get_hash, get_set, get_dict, setup_redis, \\\r\nget_key, set_key, get_queue\r\n\r\nclass TestRediswrap(unittest.TestCase):\r\n def setUp(self):\r\n \"\"\"初始化连接redis,和初始化变量\r\n \"\"\"\r\n setup_redis('default', '192.168.209.128', 6379, socket_timeout=2)\r\n get_redis(system='default').delete('list')\r\n get_redis(system='default').delete('set')\r\n get_redis(system='default').delete('hash')\r\n get_redis(system='default').delete('dict')\r\n get_redis(system='default').delete('kv')\r\n get_redis(system='default').delete('queue')\r\n self.message = {\"hello\":\"grizzly\"}\r\n \r\n def test_getset(self):\r\n \"\"\"进行基本的redis 的key进行get和set的操作.\r\n \"\"\"\r\n Test_key = get_key('kv',serialized_type='json')\r\n self.assertEqual(Test_key,None)\r\n \r\n set_key('kv',self.message)\r\n \r\n Test_key = get_key('kv',serialized_type='json')\r\n self.assertEqual(Test_key,self.message)\r\n \r\n def test_dict(self):\r\n \"\"\"测试redis_wrap的dict类型的操作\r\n \"\"\"\r\n Test_dict = get_dict('dict',serialized_type='json')\r\n \r\n Test_dict['id'] = self.message\r\n self.assertEqual(self.message, Test_dict['id'])\r\n \r\n for k,v in Test_dict.items():\r\n self.assertEqual(k, 'id')\r\n self.assertEqual(v, self.message)\r\n \r\n del Test_dict['id']\r\n self.assertNotEqual(self.message,Test_dict.get('id'))\r\n \r\n def test_hash(self):\r\n \"\"\"测试redis_wrap的 hash类型的操作\r\n \"\"\"\r\n Test_dict = get_hash('hash',serialized_type='json')\r\n \r\n Test_dict['id'] = self.message\r\n self.assertEqual(self.message, Test_dict['id'])\r\n \r\n del Test_dict['id']\r\n self.assertNotEqual(self.message,Test_dict.get('id')) \r\n \r\n def test_list(self):\r\n \"\"\"进行redis_wrap的list的基本操作\r\n \"\"\"\r\n Test_list = get_list('list',serialized_type='json')\r\n \r\n Test_list.append(self.message)\r\n self.assertEqual( len(Test_list),1)\r\n \r\n for item in Test_list:\r\n self.assertEqual(self.message, item)\r\n \r\n #这一步失败原因是redis的lrem方法有无法删除序列化后的数据\r\n Test_list.remove(self.message)\r\n self.assertEqual( len(Test_list),0)\r\n\r\n def test_set(self):\r\n \"\"\"进行对redis_wrap的set类型的基本操作\r\n \"\"\"\r\n Test_set = get_set('set',serialized_type='json')\r\n Test_set.add(self.message)\r\n \r\n for item in Test_set:\r\n self.assertEqual( item,self.message)\r\n \r\n Test_set.remove(self.message)\r\n self.assertEqual( len(Test_set),0)\r\n\r\n def test_queue(self):\r\n \"\"\"进行redis_wrap的queue的基本操作\r\n \"\"\"\r\n Test_queue = get_queue('queue',serialized_type='json')\r\n \r\n Test_queue.push(self.message)\r\n self.assertEqual( len(Test_queue),1)\r\n \r\n for item in Test_queue:\r\n self.assertEqual(self.message, item)\r\n \r\n #这一步失败原因是redis的lrem方法有无法删除数据\r\n Test_queue.remove(self.message)\r\n self.assertEqual( len(Test_queue),0)\r\n #===========================================================================\r\n # \r\n # message = Test_queue.pop(timeout= 1)\r\n # self.assertEqual(self.message, message)\r\n # self.assertEqual(len(Test_queue),0)\r\n #===========================================================================\r\n \r\nif __name__ == '__main__':\r\n unittest.main()\r\n","repo_name":"everydo/ztq","sub_path":"ztq_core/test/test_redis_wrap.py","file_name":"test_redis_wrap.py","file_ext":"py","file_size_in_byte":4097,"program_lang":"python","lang":"zh","doc_type":"code","stars":142,"dataset":"github-code","pt":"21"} +{"seq_id":"20992811422","text":"from pymongo import MongoClient, ObjectId, IndexModel, ASCENDING, DESCENDING\nfrom copy import deepcopy\n\nfrom base import Store, PRE_AUTH, PERMISSION_REQUIRED_KEY\n\n\nclass MongoStore(Store):\n \"\"\"MongoStore\"\"\"\n\n @staticmethod\n def genId(inputId):\n \"\"\"genId\n\n :param inputId:\n \"\"\"\n return ObjectId(inputId) if (isinstance(inputId, str) and\n ObjectId.is_valid(inputId)) else inputId\n\n @staticmethod\n def transformOutput(rec):\n \"\"\"transformOutput\n\n :param rec:\n \"\"\"\n if rec and '_id' in rec and isinstance(rec['_id'], str):\n rec['_id'] = str(rec['_id'])\n return rec\n\n @staticmethod\n def addRBACListFilter(users, filters):\n \"\"\"addRBACListFilter\n\n :param users:\n :param filters:\n \"\"\"\n mainFilter = [{\n PERMISSION_REQUIRED_KEY: {'$exists': False}\n }, {\n PERMISSION_REQUIRED_KEY: {'$in': user.roles}\n }]\n if '$or' in filters:\n if '$and' in filters:\n filters['$and'] = filters['$and'] + mainFilter\n else:\n filters['$and'] = [{'$or': mainFilter}, filters]\n else:\n filters['$or'] = mainFilter\n return filters\n\n def getDbCollection(self, name):\n \"\"\"getDbCollection\n\n :param name:\n \"\"\"\n return self.db.collection(self.getDbCollectionName(name))\n\n def count(self, user, coll, filters, options):\n \"\"\"count\n\n :param user:\n :param coll:\n :param filters:\n :param options:\n \"\"\"\n collection = self.preAuthorizedColl(user, coll, filters, options)\n if collection:\n return collection.count(filters)\n raise CollectionNotFoundError\n\n def list(self, user, coll, filters, options={}):\n \"\"\"list\n\n :param user:\n :param coll:\n :param filters:\n :param options:\n \"\"\"\n result = Result()\n result.records = []\n result.total = 0\n listOptions = self.preAuthorizeCollQuery(user, coll, options)\n listFilters = MongoStore.addRBACListFilter(\n filters if isinstance(filters, dict) else {})\n skip = int(listOptions.get('skip', 0))\n limit = int(listOptions.get('limit', 10))\n count = listOptions.get('count', True)\n projection = listOptions.get('projection', None)\n collection = self.getDbCollection(coll)\n if collection:\n cursor = collection.find(listFilters, projection=projection)\n if 'sort' in listOptions:\n cursor = cursor.sort(sort)\n result.records = map(MongoStore.transformOutput,\n list(cursor.skip(skip).limit(limit)))\n if count:\n result.total = self.count(self, user, listFilters, listOptions)\n return result\n\n def listcolls(self):\n \"\"\"listcolls\"\"\"\n return map(\n self.transformCollectionName, self.db.list_collection_names())\n\n def read(self, user, coll, _id, options):\n \"\"\"read\n\n :param user:\n :param coll:\n :param _id:\n :param options:\n \"\"\"\n cont = self.getDbCollection(coll).find_one({\n '_id': MongoStore.genId(_id)\n }, projection=options.get('projection'))\n self.authorize(user, coll, 0, cont, options)\n self.emitDbEvent('read:'+coll, str(_id), cont)\n return cont\n\n def write(self, user, coll, _id, data, options={}):\n \"\"\"write\n\n :param user:\n :param coll:\n :param _id:\n :param data:\n :param options:\n \"\"\"\n collection = self.getDbCollection(coll)\n if _id:\n prevInfo = self.getPrevDoc(user, coll, _id, options)\n authUpdate = self.authorize(user, coll, 1, prevInfo, options)\n self.validateSchema(self, coll, prevInfo, data, options)\n rawQuery = options.get('rawDbQuery', False)\n if rawQuery:\n if '$set' in data:\n data['$set'].update(authUpdate)\n else:\n data['$set'] = authUpdate\n collection.find_one_and_update({_id: Store.GenId(_id)}, data)\n else:\n data.update(authUpdate)\n collection.find_one_and_update({_id: Store.GenId(_id)},\n {'$set': data})\n self.emitDbEvent('update:'+coll, str(_id), cont)\n return 1\n self.validateSchema(self, coll, data, {}, options)\n data.update(self.authorizeCollection(self, user, coll, 1, options))\n newId = collection.insert_one(data).insertedId\n self.emitDbEvent('create:'+coll, str(newId), data)\n return newId\n\n def delete(self, user, coll, _id, options):\n \"\"\"delete\n\n :param user:\n :param coll:\n :param _id:\n :param options:\n \"\"\"\n prevInfo = self.getPrevDoc(user, coll, _id, options)\n authUpdate = self.authorize(user, coll, 1, prevInfo, options)\n self.getDbCollection(coll).delete_one({_id: Store.GenId(_id)})\n self.emitDbEvent('delete:'+coll, str(_id), prevInfo)\n return 1\n\n def setupIndexes(self, coll, indexes):\n \"\"\"setupIndexes\n\n :param coll:\n :param indexes:\n \"\"\"\n allIndexes = []\n if isinstance(indexes, list):\n collection = self.getDbCollection(coll)\n for ind in indexes:\n allKeys = ind.get('keyTuples', None)\n if not allKeys:\n continue\n indexArgs = ind.get('indexArgs', {})\n allIndexes.append(IndexModel(allKeys, **indexArgs))\n return allIndexes\n\n def createCollection(self, coll):\n \"\"\"createCollection\n\n :param coll:\n \"\"\"\n self.db.create_collection(self.getDbCollectionName(coll))\n\n def rmcoll(self, coll, user):\n \"\"\"rmcoll\n\n :param coll:\n :param user:\n \"\"\"\n self.authorizeCollection(coll, 1, user)\n self.getDbCollection(coll).drop()\n return 1\n","repo_name":"codeofnode/py-dbao","sub_path":"src/mongo.py","file_name":"mongo.py","file_ext":"py","file_size_in_byte":6181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40329128686","text":"def SemesterRetrieval(): #Takes the studentID and the specified semester name as input and displays the grade for all the courses taken in the semester\n ID = str(input(\"Please input Student ID: \"))\n semester = str(input(\"Please input the semester: \"))\n studentIDList = readFile('StudentInfo2.txt','r',8)\n\n courseHeaderList = studentIDList[0][2:] #List which is later used to create the header of the table.\n\n headerCounter = 2 #initialization of varibles used for counters for print statements used in loops \n header =\"\"\n printCounter = 2\n studentGrades = \"\"\n\n while(headerCounter<8): #Displays the header \n header = header + \"\\t\" + studentIDList[0][headerCounter]\n headerCounter = headerCounter + 1\n print (header)\n\n found = False \n counterID = 0 #this varible keeps track of the row position. \n for i in range(len(studentIDList)): #Loop determines whether or not the ID exists\n if(ID == str(studentIDList[i][0])):\n found = True\n counterID = i-1\n if found == False:\n print(\"There is no student associated with this ID. \")\n \n else:\n if semester == studentIDList[counterID][2]: #checks for the indicated semester and prints the associated scores. \n while(printCounter<8):\n studentGrades = studentGrades + \"\\t\" + studentIDList[counterID][printCounter]\n printCounter = printCounter + 1\n print(studentGrades)\n elif semester == studentIDList[counterID+1][2]:\n while(printCounter<8):\n studentGrades = studentGrades + \"\\t\" + studentIDList[counterID+1][printCounter]\n printCounter = printCounter + 1\n print(studentGrades)\n else:\n print(\"This student exists but the indicated semester does not. \")\n\n","repo_name":"mjbock17/studentDatabase","sub_path":"Exercise4.py","file_name":"Exercise4.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33913841749","text":"from django.urls import path\n\nfrom . import views\nimport re\n\napp_name = \"encyclopedia\"\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"search/\", views.title, name=\"title\"),\n path(\"search/\", views.reqTitle, name=\"reqTitle\"),\n path(\"createPage\", views.createPage, name=\"createPage\"),\n path(\"editPage\", views.editPage, name=\"editPage\"),\n path(\"randomize\", views.randomize, name=\"randomize\")\n]\n","repo_name":"Khongchai/CS50-django-wiki","sub_path":"encyclopedia/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73029294773","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom functools import partial\nimport logging\nfrom logging import CRITICAL, DEBUG, ERROR, Filter, Formatter, INFO, StreamHandler, WARN, getLogger\nimport re\nimport sys\n\nfrom .. import CondaError\nfrom .._vendor.auxlib.decorators import memoize\nfrom .._vendor.auxlib.logz import NullHandler\nfrom ..common.io import attach_stderr_handler\n\nTRACE = 5 # TRACE LOG LEVEL\nVERBOSITY_LEVELS = (WARN, INFO, DEBUG, TRACE)\n\n\nclass TokenURLFilter(Filter):\n TOKEN_URL_PATTERN = re.compile(\n r'(|https?://)' # \\1 scheme\n r'(|\\s' # \\2 space, or\n r'|(?:(?:\\d{1,3}\\.){3}\\d{1,3})' # ipv4, or\n r'|(?:' # domain name\n r'(?:[a-zA-Z0-9-]{1,20}\\.){0,10}' # non-tld\n r'(?:[a-zA-Z]{2}[a-zA-Z0-9-]{0,18})' # tld\n r'))' # end domain name\n r'(|:\\d{1,5})?' # \\3 port\n r'/t/[a-z0-9A-Z-]+/' # token\n )\n TOKEN_REPLACE = partial(TOKEN_URL_PATTERN.sub, r'\\1\\2\\3/t//')\n\n def filter(self, record):\n record.msg = self.TOKEN_REPLACE(record.msg)\n return True\n\n\n@memoize\ndef initialize_logging():\n initialize_root_logger()\n initialize_conda_logger()\n\n formatter = Formatter(\"%(message)s\\n\")\n\n stdout = getLogger('stdout')\n stdout.setLevel(INFO)\n stdouthandler = StreamHandler(sys.stdout)\n stdouthandler.setLevel(INFO)\n stdouthandler.setFormatter(formatter)\n stdout.addHandler(stdouthandler)\n stdout.addFilter(TokenURLFilter())\n stdout.propagate = False\n\n stderr = getLogger('stderr')\n stderr.setLevel(INFO)\n stderrhandler = StreamHandler(sys.stderr)\n stderrhandler.setLevel(INFO)\n stderrhandler.setFormatter(formatter)\n stderr.addHandler(stderrhandler)\n stderr.addFilter(TokenURLFilter())\n stderr.propagate = False\n\n binstar = getLogger('binstar')\n binstar.setLevel(CRITICAL+1)\n binstar.addHandler(NullHandler())\n binstar.propagate = False\n binstar.disabled = True\n\n\ndef initialize_root_logger(level=ERROR):\n attach_stderr_handler(level)\n\n\ndef initialize_conda_logger(level=WARN):\n attach_stderr_handler(level, 'conda')\n\n\ndef set_all_logger_level(level=DEBUG):\n formatter = Formatter(\"%(message)s\\n\") if level >= INFO else None\n attach_stderr_handler(level, formatter=formatter)\n attach_stderr_handler(level, 'conda', formatter=formatter)\n attach_stderr_handler(level, 'binstar', formatter=formatter)\n attach_stderr_handler(level, 'requests')\n attach_stderr_handler(level, 'requests.packages.urllib3')\n\n\ndef set_verbosity(verbosity_level):\n try:\n set_all_logger_level(VERBOSITY_LEVELS[verbosity_level])\n except IndexError:\n raise CondaError(\"Invalid verbosity level: %(verbosity_level)s\",\n verbosity_level=verbosity_level)\n\n\ndef trace(self, message, *args, **kwargs):\n if self.isEnabledFor(TRACE):\n self._log(TRACE, message, args, **kwargs)\n\n\nlogging.addLevelName(TRACE, \"TRACE\")\nlogging.Logger.trace = trace\ninitialize_logging()\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/conda_conda/conda-master/conda/gateways/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":3053,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"16111295495","text":"features = ['funded_amnt','emp_length','annual_inc','home_ownership','grade',\n \"last_pymnt_amnt\", \"mort_acc\", \"pub_rec\", \"int_rate\", \"open_acc\",\"num_actv_rev_tl\",\n \"mo_sin_rcnt_rev_tl_op\",\"mo_sin_old_rev_tl_op\",\"bc_util\",\"bc_open_to_buy\",\n \"avg_cur_bal\",\"acc_open_past_24mths\",'loan_status'] #'sub_grade' #selecting final features #'addr_state''tax_liens',\nFinal_data = dataset[features] #19 features with target var\nFinal_data[\"int_rate\"] = Final_data[\"int_rate\"].apply(lambda x:float(x[:-1]) ) #reomving % sign, conv to float - int_rate column\nFinal_data= Final_data.reset_index(drop=True)\nprint(\"Current shape of dataset :\",Final_data.shape)\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"msaluck/ml-huawei-lab","sub_path":"machinelearning-lab/Mechine Learning/Private Credit Default Prediction/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17473473832","text":"'''\nIn a fair coin we have an equal chance (50%) of either getting a ‘head’ or ‘tail’. That is if we toss the coin a large number of times\nwe would observe head approximately 50% of the time. Write a program to implement a biased coin toss where the chance of getting a head\nis 70% (and tail 30%). That is if we invoke the program 1000 times we should see the head randomly approximately 700 times.\n'''\n\n#PF-Tryout\n#Start writing your code here\nimport random\n\ndef biased_flip(prob_true):\n return random.random() amount: return -1\n else: return cache[amount]\n ","repo_name":"RodellRodriguez/coding-problems","sub_path":"leetcode/coin_change.py","file_name":"coin_change.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1325643069","text":"import warnings\nfrom enum import Enum\n\nfrom torch import optim\nimport torch_optimizer\n\n\nclass OptimizersTypes(str, Enum):\n sgd = \"sgd\"\n yogi = \"yogi\"\n adam = \"adam\"\n radam = \"radam\"\n diffgrad = \"diffgrad\"\n novograd = \"novograd\"\n adabound = \"adabound\"\n\n\noptimizers = {\n OptimizersTypes.sgd: optim.SGD,\n OptimizersTypes.yogi: torch_optimizer.Yogi,\n OptimizersTypes.adam: optim.Adam,\n OptimizersTypes.radam: torch_optimizer.RAdam,\n OptimizersTypes.diffgrad: torch_optimizer.DiffGrad,\n OptimizersTypes.novograd: torch_optimizer.NovoGrad,\n OptimizersTypes.adabound: torch_optimizer.AdaBound\n}\n\noptimizers_options = {\n OptimizersTypes.sgd: [\"momentum\", \"dampening\", \"nesterov\"],\n OptimizersTypes.yogi: [\"betas\", \"eps\", \"initial_accumulator\"],\n OptimizersTypes.adam: [\"betas\", \"eps\", \"amsgrad\"],\n OptimizersTypes.radam: [\"betas\", \"eps\"],\n OptimizersTypes.diffgrad: [\"betas\", \"eps\"],\n OptimizersTypes.novograd: [\"betas\", \"eps\", \"grad_averaging\", \"amsgrad\"],\n OptimizersTypes.adabound: [\"betas\", \"eps\", \"final_lr\", \"gamma\", \"amsbound\"]\n}\n\n\ndef build_optimizer(parameters, hparams):\n optimizer_type = OptimizersTypes[hparams.optimizer]\n optimizer_opts = {} if hparams.optim_options is None else hparams.optim_options\n\n if optimizer_type in OptimizersTypes:\n if not all(arg in optimizers_options[optimizer_type] for arg in optimizer_opts):\n raise ValueError(\"You tried to pass options incompatible with {} optimizer. \"\n \"Check your parameters according to the description of the optimizer:\\n\\n{}\".\n format(optimizer_type, optimizers[optimizer_type].__doc__))\n\n optimizer = optimizers[optimizer_type](\n parameters,\n lr=hparams.learning_rate,\n weight_decay=hparams.weight_decay,\n **optimizer_opts\n )\n else:\n raise ValueError(f\"`{optimizer_type}` is not a valid optimizer type\")\n\n if hparams.with_lookahead:\n optimizer = torch_optimizer.Lookahead(optimizer, k=5, alpha=0.5)\n\n return optimizer\n\n\nclass FakeScheduler(optim.lr_scheduler._LRScheduler):\n def get_lr(self):\n if not self._get_lr_called_within_step:\n print(\"Warning... To get the last learning rate computed by the scheduler, please use `get_last_lr()`.\", DeprecationWarning)\n\n return [group['lr'] for group in self.optimizer.param_groups]\n\n\nclass SchedulerTypes(str, Enum):\n none = \"none\"\n multi_step = \"multi_step\"\n exponential = \"exp\"\n plateau = \"plateau\"\n cyclic = \"cyclic\"\n\n\nclass ReduceLROnPlateau(optim.lr_scheduler.ReduceLROnPlateau):\n def __init__(self, optimizer, mode='min', factor=0.1, patience=10,\n verbose=False, threshold=1e-4, threshold_mode='rel',\n cooldown=0, min_lr=0, eps=1e-8):\n super(ReduceLROnPlateau, self).__init__(optimizer, mode, factor, patience,\n verbose, threshold, threshold_mode, cooldown, min_lr, eps)\n\n self._last_lr = [group['lr'] for group in self.optimizer.param_groups]\n\n\n def get_last_lr(self):\n return self._last_lr\n\n\nschedulers = {\n SchedulerTypes.none: FakeScheduler,\n SchedulerTypes.multi_step: optim.lr_scheduler.MultiStepLR,\n SchedulerTypes.exponential: optim.lr_scheduler.ExponentialLR,\n SchedulerTypes.plateau: ReduceLROnPlateau,\n SchedulerTypes.cyclic: optim.lr_scheduler.CyclicLR\n}\n\nschedulers_options = {\n SchedulerTypes.none: [],\n SchedulerTypes.multi_step: [\"milestones\", \"gamma\", \"last_epoch\"],\n SchedulerTypes.exponential: [\"gamma\", \"last_epoch\"],\n SchedulerTypes.plateau: [\"mode\", \"factor\", \"patience\", \"threshold\", \"threshold_mode\", \"cooldown\", \"min_lr\", \"eps\"],\n SchedulerTypes.cyclic: [\"base_lr\", \"max_lr\", \"step_size_up\", \"step_size_down\", \"mode\", \"gamma\", \"scale_fn\",\n \"scale_mode\", \"cycle_momentum\", \"base_momentum\", \"max_momentum\", \"last_epoch\"]\n}\n\n\ndef build_scheduler(optimizer, hparams):\n scheduler_type = SchedulerTypes[hparams.lr_scheduler]\n scheduler_opts = {} if hparams.lr_scheduler_options is None else hparams.lr_scheduler_options\n\n if scheduler_type in SchedulerTypes:\n if not all(arg in schedulers_options[scheduler_type] for arg in scheduler_opts):\n raise ValueError(\"You tried to pass options incompatible with {} lr scheduler. \"\n \"Check your parameters according to the description of the scheduler:\\n\\n{}\".\n format(scheduler_type, schedulers[scheduler_type].__doc__))\n\n scheduler = schedulers[scheduler_type](\n optimizer,\n **scheduler_opts\n )\n else:\n raise ValueError(f\"`{scheduler_type}` is not a valid optimizer type\")\n\n return scheduler","repo_name":"wladradchenko/wunjo.wladradchenko.ru","sub_path":"portable/src/tacotron2/modules/optimizers.py","file_name":"optimizers.py","file_ext":"py","file_size_in_byte":4796,"program_lang":"python","lang":"en","doc_type":"code","stars":404,"dataset":"github-code","pt":"21"} +{"seq_id":"16371558199","text":"import torch\nimport torch.nn as nn\n\n\ndef double_conv(in_c,out_c):\n conv = nn.Sequential(\n nn.Conv2d(in_c,out_c,kernel_size=3),\n nn.ReLU(inplace=True),\n nn.Conv2d(out_c,out_c,kernel_size=3),\n nn.ReLU(inplace=True),\n )\n return conv\n\ndef crop_img(tensor,target_tensor):\n target_size = target_tensor.size()[2] # bs,c,h,w Ex:[[1,1024,28,28]] --> target_size = [56]\n tensor_size = tensor.size()[2] # Similarly it returns tensor_size = [28]\n delta = tensor_size - target_size # Calculate the difference\n delta = delta // 2 # Divide the difference by 2\n return tensor[:,:,delta:tensor_size-delta,delta:tensor_size-delta] # reshape the tensor \n\nclass UNet(nn.Module):\n def __init__(self):\n super(UNet, self).__init__()\n\n self.max_pool_2x2 = nn.MaxPool2d(kernel_size=2,stride=2)\n self.down_conv_1 = double_conv(1,64)\n self.down_conv_2 = double_conv(64,128)\n self.down_conv_3 = double_conv(128,256)\n self.down_conv_4 = double_conv(256,512)\n self.down_conv_5 = double_conv(512,1024)\n\n# One \n self.up_trans_1 = nn.ConvTranspose2d(\n in_channels=1024,\n out_channels=512, # Only half of the 1024 channel comes from here.\n kernel_size=2,\n stride=2,\n )\n\n self.up_conv_1 = double_conv(1024,512) # because the previous output channels are combined with the cropped image\n\n# Two\n self.up_trans_2 = nn.ConvTranspose2d(\n in_channels=512,\n out_channels=256, # Only half of the 1024 channel comes from here.\n kernel_size=2,\n stride=2,\n )\n\n self.up_conv_2 = double_conv(512,256) # because the previous output channels are combined with the cropped image\n\n# Three\n self.up_trans_3 = nn.ConvTranspose2d(\n in_channels=256,\n out_channels=128, # Only half of the 1024 channel comes from here.\n kernel_size=2,\n stride=2,\n )\n\n self.up_conv_3 = double_conv(256,128) # because the previous output channels are combined with the cropped image\n\n\n# Four\n self.up_trans_4 = nn.ConvTranspose2d(\n in_channels=128,\n out_channels=64, # Only half of the 1024 channel comes from here.\n kernel_size=2,\n stride=2,\n )\n\n self.up_conv_4 = double_conv(128,64) # because the previous output channels are combined with the cropped image\n\n# Output Layer\n self.out = nn.Conv2d(\n in_channels = 64,\n out_channels=2, # Given in the paper\n kernel_size=1 # check??\n )\n\n\n\n\n \n\n def forward(self, image):\n # encoder \n # bs,c,h,w\n x1 = self.down_conv_1(image)\n #print(\"Start of Encoder Size: \",x1.size())\n x2 = self.max_pool_2x2(x1)\n x3 = self.down_conv_2(x2)\n x4 = self.max_pool_2x2(x3)\n x5 = self.down_conv_3(x4)\n x6 = self.max_pool_2x2(x5)\n x7 = self.down_conv_4(x6)\n x8 = self.max_pool_2x2(x7)\n x9 = self.down_conv_5(x8)\n #print(\"End of Encoder Size: \", x9.size())\n #print(\"Cropped test: \", x9.size()[2])\n\n # decoder\n x = self.up_trans_1(x9)\n y = crop_img(x7,x)\n x = self.up_conv_1(torch.cat([x,y],axis=1))\n\n\n x = self.up_trans_2(x)\n y = crop_img(x5,x)\n x = self.up_conv_2(torch.cat([x,y],axis=1))\n\n x = self.up_trans_3(x)\n y = crop_img(x3,x)\n x = self.up_conv_3(torch.cat([x,y],axis=1))\n\n\n x = self.up_trans_4(x)\n y = crop_img(x1,x)\n x = self.up_conv_4(torch.cat([x,y],axis=1))\n\n x = self.out(x)\n print(x.size())\n return x\n \n\n\n\n\n\n\n #print(\"Upsampling Output\",x.size())\n # remove the below comments for understanding cropping process\n #print(\"Original image size\",x7.size()) # Original image is cropped to match up sampling image\n #print(\"Cropped image\",y.size()) \n\n\nif __name__ == \"__main__\":\n image = torch.rand((1,1,572,572)) #(bs,c,h,w)\n model = UNet()\n print(model(image))\n \n\n\n ","repo_name":"Vsanku01/PyTorch-Projects","sub_path":"Paper Implementations/UNet paper/unet.py","file_name":"unet.py","file_ext":"py","file_size_in_byte":4126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7810200106","text":"#!/usr/bin/python\n# ...\n# Andres Osorio - aosorio@uniandes.edu.co\n# ...\n\nimport os,sys\nimport string\nfrom optparse import OptionParser\n#-----------------------------------------------------\n\nparser = OptionParser()\nparser.add_option(\"-d\", type = \"string\", dest=\"dir\",\n help=\"DIR\", metavar=\"DIR\" )\n\n\n(options, args) = parser.parse_args()\n\nif options.dir is None:\n parser.error(\"please give a directory\")\n\n#-----------------------------------------------------\n\nwidth='width=1.3\\\\textwidth'\nangle='angle=0'\ncaption='UCT2015'\n\npath = options.dir\n\noutput = open('output.tex','w')\n\nscript = ''\nscript += ' \\\\documentclass{article}\\n'\nscript += ' \\\\usepackage{graphicx}\\n'\nscript += ' \\\\begin{document}\\n'\nscript += ' \\\\pagestyle{empty}\\n'\nscript += '\\n'\n\ncmd = 'find '+ path + ' -name \\\"*.eps\\\"'\nfiles = os.popen(cmd,'r').readlines()\n\nfor f in files:\n file = f[:-1]\n\n insertfile = '\\\\includegraphics[' + width + ',' + angle + ']{' + file + '}\\n'\n\n full_path = file.split('/')\n \n caption = full_path[-4] + ':' + full_path[-3] + ':'\n fname = full_path[-1].replace('_','-')\n caption += fname\n script += '\\\\clearpage\\n' \n script += '\\\\begin{figure}[!ht]\\n' \n script += '\\\\centering\\n' \n script += insertfile\n script += '\\\\caption{' + caption + '}\\n'\n script += '\\\\end{figure}\\n' \n script += '\\n'\n\nscript += '\\\\end{document}'\n\noutput.writelines( script )\n\noutput.close()\n","repo_name":"andres0sorio/UCTWork","sub_path":"L1Upgrade/Stage1/makePDF.py","file_name":"makePDF.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18591036931","text":"class Solution:\n def findDuplicate(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if len(nums) <= 0:\n return -1\n \n slow = nums[0]\n fast = nums[nums[0]]\n while slow != fast:\n slow = nums[slow]\n fast = nums[nums[fast]]\n \n entry = nums[0]\n slow = nums[slow]\n while slow != entry:\n entry = nums[entry]\n slow = nums[slow]\n \n return entry\n \n","repo_name":"PingHGao/leetcode","sub_path":"py3_solution/287_FindtheDuplicateNumber.py","file_name":"287_FindtheDuplicateNumber.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31790979911","text":"\"\"\"\n스택 (stack)은 기본적인 자료구조 중 하나로, 컴퓨터 프로그램을 작성할 때 자주 이용되는 개념이다.\n스택은 자료를 넣는 (push) 입구와 자료를 뽑는 (pop) 입구가 같아 제일 나중에 들어간 자료가 제일 먼저 나오는 (LIFO, Last in First out) 특성을 가지고 있다.\n\n1부터 n까지의 수를 스택에 넣었다가 뽑아 늘어놓음으로써, 하나의 수열을 만들 수 있다.\n이때, 스택에 push하는 순서는 반드시 오름차순을 지키도록 한다고 하자.\n임의의 수열이 주어졌을 때 스택을 이용해 그 수열을 만들 수 있는지 없는지, 있다면 어떤 순서로 push와 pop 연산을 수행해야 하는지를 알아낼 수 있다.\n이를 계산하는 프로그램을 작성하라.\n\"\"\"\n\nimport sys\n\nN = int(sys.stdin.readline())\narr = [int(sys.stdin.readline()) for _ in range(N)]\n\nstart, idx = 1, 0\nstack = [start]\nresult = ['+']\ncount = 0\nflag = True\nwhile count < N:\n if start > 2*N:\n flag = False\n break\n\n if stack and arr[idx] == stack[-1]:\n stack.pop()\n result.append('-')\n idx += 1\n count += 1\n continue\n start += 1\n stack.append(start)\n result.append('+')\n\nif flag:\n for sign in result:\n print(sign)\nelse:\n print(\"NO\")\n","repo_name":"sangm1n/problem-solving","sub_path":"BOJ/1874.py","file_name":"1874.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9419021942","text":"\"\"\"\nSortowanie. Trzy dowolne liczby podane przez użytkownika zapisz do trzech zmiennych.\nZnajdź największą liczbę. Wyświetl liczby od największej do najmniejszej.\n\n\"\"\"\nnumberList = [0, 0, 0]\nfor i in range(len(numberList)):\n numberList[i] = int(input(\"Write number:..\"))\nnumberList.sort()\nprint('\\nMaximum number:', max(numberList))\nfor x in range(1, len(numberList)+1):\n print(numberList[-x], \" \", end=\"\")\n","repo_name":"gekogit/pythonCourse","sub_path":"03-control/if08.py","file_name":"if08.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42256739559","text":"#-*- coding:utf-8 -*-\n\nimport maya.cmds as cmds\nimport re\nimport sys\nimport mtoa.aovs\n\nfrom YM_ReferenceOperations import *\nfrom YM_AOVOperator import *\n\ndef referenceCreatorWindow():\n if cmds.window('ReferenceCreatorWindow',ex=True):\n cmds.deleteUI('ReferenceCreatorWindow')\n \n cmds.window('ReferenceCreatorWindow',t='YM_ReferenceOperatorWindow',w=400,h=400)\n cmds.columnLayout()\n \n cmds.rowLayout(nc=6)\n cmds.checkBox('Sets_ReferenceCheckBox',l='Sets',v=0)\n cmds.checkBox('Props_ReferenceCheckBox',l='Props',v=1)\n cmds.checkBox('Chars_ReferenceCheckBox',l='Chars',v=1)\n cmds.button('YM_CreateRenderLayer_Separate',bgc=[0.1,0.2,0.3],l='Separate',w=70)\n cmds.button('YM_CreateRenderLayer_Combine',bgc=[0.15,0.25,0.35],l='Combine',w=70)\n cmds.button('YM_AOVOperation',bgc=[0.35,0.25,0.15],l='AOV Operation',w=100)\n \n cmds.setParent(u=True)\n \n cmds.separator(w=400,h=2,style='none',bgc=[0.0,0.2,0.35])\n cmds.rowLayout(nc=6)\n cmds.checkBox('Mesh_ReferenceCheckBox',l='Mesh',v=1)\n cmds.checkBox('Curve_ReferenceCheckBox',l='nurbsCurve',v=1)\n cmds.checkBox('Particle_ReferenceCheckBox',l='Particle',v=1)\n cmds.checkBox('Fluid_ReferenceCheckBox',l='Fluid',v=1)\n #cmds.checkBox('Light_ReferenceCheckBox',l='Light',v=1)\n cmds.setParent(u=True)\n cmds.textScrollList('YM_ReferenceWindow_List',w=400,h=500,ams=True)\n \n cmds.setParent(u=True)\n cmds.showWindow('ReferenceCreatorWindow')\n\n cmds.checkBox('Sets_ReferenceCheckBox',e=True,cc=lambda *args:refreshReferenceList())\n cmds.checkBox('Props_ReferenceCheckBox',e=True,cc=lambda *args:refreshReferenceList())\n cmds.checkBox('Chars_ReferenceCheckBox',e=True,cc=lambda *args:refreshReferenceList())\n cmds.button('YM_CreateRenderLayer_Separate',e=True,c=lambda *args:createRenderLayer(True))\n cmds.button('YM_CreateRenderLayer_Combine',e=True,c=lambda *args:createRenderLayer(False))\n cmds.button('YM_AOVOperation',e=True,c=lambda *args:AOVOperator_Window())\n refreshReferenceList()\n\ndef refreshReferenceList():\n cmds.textScrollList('YM_ReferenceWindow_List',e=True,ra=True)\n for reference in getLoadedReferenceList():\n if cmds.checkBox('Sets_ReferenceCheckBox',q=True,v=True) and checkReferenceTypeByPath(cmds.referenceQuery(reference,filename=True),'([s|S]ets){1}'):\n cmds.textScrollList('YM_ReferenceWindow_List',e=True,a=reference)\n if cmds.checkBox('Props_ReferenceCheckBox',q=True,v=True) and checkReferenceTypeByPath(cmds.referenceQuery(reference,filename=True),'([p|P]rops){1}'):\n cmds.textScrollList('YM_ReferenceWindow_List',e=True,a=reference)\n if cmds.checkBox('Chars_ReferenceCheckBox',q=True,v=True) and checkReferenceTypeByPath(cmds.referenceQuery(reference,filename=True),'([c|C]hars){1}'):\n cmds.textScrollList('YM_ReferenceWindow_List',e=True,a=reference)\n\ndef getFormatFilename(filepath):\n filename = re.search('([^<>/\\\\\\|:\"\"\\*\\?]+)\\.\\w+$',filepath)\n extensionName = re.search('(\\.\\w+){0,}$',filename.string[filename.start():filename.end()])\n pureName = filename.string[filename.start():filename.end()-len(extensionName.string[extensionName.start():extensionName.end()])]\n filenameList = pureName.split('_')\n return filenameList\n\ndef findRenderableObjects(referenceNode):\n renderableObjectTypes = list()\n if cmds.checkBox('Mesh_ReferenceCheckBox',q=True,v=True):\n renderableObjectTypes.append('mesh')\n if cmds.checkBox('Curve_ReferenceCheckBox',q=True,v=True):\n renderableObjectTypes.append('nurbsCurve')\n if cmds.checkBox('Particle_ReferenceCheckBox',q=True,v=True):\n renderableObjectTypes.append('particle')\n if cmds.checkBox('Fluid_ReferenceCheckBox',q=True,v=True):\n renderableObjectTypes.append('fluid')\n #if cmds.checkBox('Light_ReferenceCheckBox',q=True,v=True):\n # renderableObjectTypes.append('light')\n \n renderableObjects = list()\n referenceNodes = cmds.referenceQuery(referenceNode,nodes=True,dp=True)\n if len(referenceNodes) == 0:\n return ['']\n for objectType in renderableObjectTypes:\n for shape in referenceNodeTypeFilter(referenceNodes,objectType):\n if cmds.objExists(shape) is False:\n continue\n if cmds.getAttr(shape+'.intermediateObject') == 1:\n continue\n if cmds.getAttr(shape+'.visibility') == 0:\n continue\n if cmds.getAttr(shape+'.template') == 1:\n continue\n renderableObjects.append(shape)\n return renderableObjects\n\ndef createRenderLayer(trigger):\n if cmds.textScrollList('YM_ReferenceWindow_List',q=True,si=True) is None:\n cmds.warning('Select Some Reference Name!!!')\n return\n\n sceneAOVs = cmds.ls(type='aiAOV')\n \n if trigger is True:\n for reference in cmds.textScrollList('YM_ReferenceWindow_List',q=True,si=True):\n #Get reference's RenderLayer Node and give a hint with already layered reference\n count = 0\n for renderLayer in referenceNodeTypeFilter(cmds.referenceQuery(reference,nodes=True,dp=True),'renderLayer'):\n if re.search('(defaultRenderLayer){1,}',renderLayer) is None:\n if cmds.confirmDialog( title='Continue?', message=reference + ' has some renderLayers already, Continue?', button=['Yes','No'], defaultButton='Yes', cancelButton='No', dismissString='No' ) == 'No':\n count = 1\n continue\n if count > 0:\n continue\n\n renderableShapes = cmds.listRelatives(findRenderableObjects(reference),p=True,f=True)\n\n if len(renderableShapes) == 0:\n continue\n #Get the reference filename \n filenameList = getFormatFilename(cmds.referenceQuery(reference,filename=True,wcn=True))\n\n #Get reference's aiAOV Node\n referenceAOVs = referenceNodeTypeFilter(cmds.referenceQuery(reference,nodes=True),'aiAOV')\n \n referenceLayer = cmds.createRenderLayer(e=True,n=filenameList[1],noRecurse=True)\n cmds.editRenderLayerMembers(referenceLayer,renderableShapes,nr=True)\n\n #Switch to defaultRenderLayer and set the reference AOVs to disabled\n cmds.editRenderLayerGlobals(crl='defaultRenderLayer')\n \n for aov in referenceAOVs:\n cmds.setAttr(aov+'.enabled',0)\n\n #Switch to new Layer and set the relative AOVs to enabled , the unrelative AOVs to disabled\n cmds.editRenderLayerGlobals(crl=referenceLayer)\n for aov in sceneAOVs:\n cmds.editRenderLayerAdjustment(aov+'.enabled',layer=referenceLayer)\n if aov not in referenceAOVs:\n cmds.setAttr(aov+'.enabled',0)\n else:\n cmds.setAttr(aov+'.enabled',1)\n \n else:\n renderableObjects = list()\n referenceAOVs = list()\n for reference in cmds.textScrollList('YM_ReferenceWindow_List',q=True,si=True):\n for parent in cmds.listRelatives(findRenderableObjects(reference),p=True,f=True):\n renderableObjects.append(parent)\n\n for aov in referenceNodeTypeFilter(cmds.referenceQuery(reference,nodes=True),'aiAOV'):\n referenceAOVs.append(aov)\n\n if len(renderableObjects) == 0:\n return\n \n referenceLayer = cmds.createRenderLayer(e=True,n='ChangeMyName',noRecurse=True)\n cmds.editRenderLayerMembers(referenceLayer,renderableObjects,nr=True)\n \n cmds.editRenderLayerGlobals(crl='defaultRenderLayer')\n for aov in referenceAOVs:\n cmds.setAttr(aov+'.enabled',0)\n \n cmds.editRenderLayerGlobals(crl=referenceLayer)\n for aov in sceneAOVs:\n cmds.editRenderLayerAdjustment(aov+'.enabled',layer=referenceLayer)\n if aov not in referenceAOVs:\n cmds.setAttr(aov+'.enabled',0)\n else:\n cmds.setAttr(aov+'.enabled',1)\n\n #print renderableObjects\n\n\n\n\n","repo_name":"onexeno/Maya_Scripts","sub_path":"YM_Lib/common/YM_RenderLayerOperator_v03.py","file_name":"YM_RenderLayerOperator_v03.py","file_ext":"py","file_size_in_byte":8120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19990821564","text":"from django.urls import path\nfrom . import views\nfrom django.views.decorators.csrf import csrf_exempt\n\nurlpatterns = [\n path('api/shows', views.ShowsList.as_view(), name='shows_list'),\n path('api/shows/', views.ShowsDetail.as_view(), name='shows_detail'),\n path('api/users', views.UsersList.as_view(), name='users_list'),\n path('api/users/', views.UsersDetail.as_view(), name='users_detail'),\n path('api/users/login', csrf_exempt(views.check_login), name=\"check_login\") #routes api/users/login to check_login function for auth\n]\n","repo_name":"kcastillo90/tv-zone-back","sub_path":"tv_zone/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13267817540","text":"import pytest\nimport torch\n\nfrom quant.common.parser import get_base_argument_parser, parse_config\n\n\n@pytest.fixture()\ndef base_parser():\n \"\"\"Fixture for base argument parser.\"\"\"\n return get_base_argument_parser('base parser')\n\n\ndef test_standard_args(base_parser):\n \"\"\"Test parsing standard arguments.\"\"\"\n args = base_parser.parse_args('--config examples/mnist/mnist_fp.yaml'.split(' '))\n config = parse_config(args)\n\n assert isinstance(config['experiment_name'], str) and len(config['experiment_name'])\n assert config['environment']['platform'] == 'local'\n assert config['environment']['ngpus'] == (1 if torch.cuda.is_available() else 0)\n assert 'init_from_checkpoint' not in config\n assert 'restore_experiment' not in config\n assert not config['skip_training']\n\n\ndef test_missing_config(base_parser):\n \"\"\"Test missing config.\"\"\"\n args = base_parser.parse_args([])\n with pytest.raises(ValueError):\n parse_config(args)\n\n\ndef test_gpu_override(base_parser):\n \"\"\"Test CLI ngpus argument can override what is in the config.\"\"\"\n args = base_parser.parse_args('--config examples/mnist/mnist_fp.yaml --ngpus 8'.split(' '))\n config = parse_config(args)\n\n assert config['environment']['ngpus'] == 8\n","repo_name":"yangelides3/testDependency","sub_path":"tests/common/test_parser.py","file_name":"test_parser.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33034534632","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed May 27 20:54:49 2020\r\n\r\n@author: stnav\r\n\"\"\"\r\n\r\n##simplified model of Roly Poly Toy\r\ndef simplified():\r\n import numpy as np\r\n from numpy import sin, cos\r\n import matplotlib.pyplot as plt\r\n from math import pi\r\n import matplotlib.animation as animation\r\n import matplotlib.patches as patches\r\n\r\n\r\n R = 0.5\r\n m = 250\r\n h = 3/8*R\r\n Ic = 83/320*m*R**2\r\n g = 9.81\r\n\r\n def f(r,t):\r\n theta = r[0]\r\n ftheta = r[1]\r\n# fftheta = -(h*m*sin(theta)*(g+R*ftheta**2))/(Ic+m*(R**2+h**2-2*h*R*cos(theta)))\r\n fftheta = -(h*m*sin(theta)*(g+R*ftheta**2)+2*h*m*R*sin(theta))/(Ic+m*(R**2+h**2-2*h*R*cos(theta)))\r\n return np.array([ftheta,fftheta], float)\r\n\r\n tmin = 0.0\r\n tmax = 10.0\r\n N = 400 \r\n\r\n theta0 = 60/180*pi\r\n ftheta0 = 0\r\n\r\n step = (tmax-tmin)/N\r\n t_pts = np.arange(tmin,tmax,step)\r\n theta_pts = []\r\n ftheta_pts = []\r\n r = np.array([theta0, ftheta0], float)\r\n for t in t_pts:\r\n theta_pts.append(r[0])\r\n ftheta_pts.append(r[1])\r\n k1 = step*f(r,t)\r\n k2 = step*f(r+0.5*k1,t+0.5*step)\r\n k3 = step*f(r+0.5*k2,t+0.5*step)\r\n k4 = step*f(r+k3,t+step)\r\n r += (k1+2*k2+2*k3+k4)/6 \r\n\r\n hc_pts = R - h*cos(theta_pts)\r\n xo = np.array(theta_pts)*(-R)\r\n yo = np.ones(N)*R\r\n\r\n \r\n plt.figure(1, figsize=(6,4))\r\n plt.title(\"Angle vs. t\")\r\n plt.ylabel(r\"$\\theta(t)$\")\r\n plt.xlabel(r\"$t$\")\r\n plt.plot(t_pts,theta_pts,'*-')\r\n\r\n plt.figure(2, figsize=(6,4))\r\n plt.title(\"Angular velocity vs. t\")\r\n plt.ylabel(r\"$\\dot{\\theta}(t)$\")\r\n plt.xlabel(r\"$t$\")\r\n plt.plot(t_pts,ftheta_pts,'*-')\r\n\r\n plt.figure(3, figsize=(6,4))\r\n plt.title(\"Height of cm vs. t\")\r\n plt.ylabel(r\"$h_c(t)$\")\r\n plt.xlabel(r\"$t$\")\r\n plt.plot(t_pts,hc_pts,'*-')\r\n \r\n fig = plt.figure(4)\r\n ax = fig.add_subplot(111, autoscale_on=False, ylim=[-1,R+1], xlim=[-(R+1),(R+1)], title=\"Animation of Roly Poly Toy\")\r\n ax.set_aspect('equal')\r\n ax.grid()\r\n\r\n# line, = ax.plot([], [], 'o-', lw=2)\r\n time_template = 'time = %.1fs'\r\n time_text = ax.text(0.05, 0.9, '', transform=ax.transAxes)\r\n \r\n w1 = patches.Wedge((xo[0],yo[0]), R, 180+theta_pts[0]/pi*180, theta_pts[0]/pi*180,color='b')\r\n\r\n def init():\r\n time_text.set_text('')\r\n ax.plot([-10,10],[0,0],'-',color='black')\r\n ax.add_patch(w1)\r\n return w1,time_text\r\n\r\n\r\n def animate(i):\r\n w1.set_center((xo[i],yo[i]))\r\n w1.theta1 = 180 + theta_pts[i]/pi*180\r\n w1.theta2 = theta_pts[i]/pi*180\r\n time_text.set_text(time_template % (i*step))\r\n return w1,time_text\r\n \r\n ani = animation.FuncAnimation(fig, animate, range(1, N),\r\n interval=step*1000, blit=True, init_func=init)\r\n ani.save('RolyPoly_simplified.mp4')\r\n# plt.show()\r\n \r\nsimplified()","repo_name":"rstanuwijaya/Roly-Poly","sub_path":"Simplified.py","file_name":"Simplified.py","file_ext":"py","file_size_in_byte":2912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33680710387","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# @Time : 2018-04-10 15:02\n# @Author : PengZhw\n\n# @Software: PyCharm\n\nimport math\n\n\nclass Point:\n \"\"\"\n 一个点对象,只有坐标信息和刷新函数\n \"\"\"\n\n def __init__(self, x: float, z: float, y: float):\n self.x = x\n self.z = z\n self.y = y\n self.coordinate = [self.x, self.y, self.z]\n\n def refresh(self):\n self.coordinate = [self.x, self.y, self.z]\n\n\nclass Flat: # 认为平面一定平行于XOZ平面\n def __init__(self, p1: Point, p2: Point, p3: Point, p4: Point):\n \"\"\"\n 四个点必须按相邻顺序传入\n\n \"\"\"\n self.y = (p1.y + p2.y + p3.y + p4.y) / 4 # y坐标的平均值作为平板的y\n self.p1, self.p2, self.p3, self.p4 = p1, p2, p3, p4\n self.p1.y, self.p2.y, self.p3.y, self.p4.y = self.y, self.y, self.y, self.y\n\n def tell_corner(self):\n self.p1.refresh()\n self.p2.refresh()\n self.p3.refresh()\n self.p4.refresh()\n print(\"平面的四个顶点分别是:\", self.p1.coordinate, self.p2.coordinate, self.p3.coordinate, self.p4.coordinate)\n\n def if_cover(self, p):\n # 用余弦定理判断给定点的投影是否在平板内\n def angle(p_a, p_b, p_c):\n a = math.sqrt((p_b.x - p_c.x) ** 2 + (p_b.z - p_c.z) ** 2)\n b = math.sqrt((p_c.x - p_a.x) ** 2 + (p_c.z - p_a.z) ** 2)\n c = math.sqrt((p_b.x - p_a.x) ** 2 + (p_b.z - p_a.z) ** 2)\n if (b ** 2 + c ** 2 - a ** 2) / (2 * b * c) > 1:\n return math.acos(1)\n elif (b ** 2 + c ** 2 - a ** 2) / (2 * b * c) < -1:\n return math.acos(-1)\n alpha = math.acos((b ** 2 + c ** 2 - a ** 2) / (2 * b * c))\n\n return alpha\n\n bete = angle(p, self.p1, self.p2) + angle(p, self.p2, self.p3) + angle(p, self.p3, self.p4) + angle(p, self.p4,\n self.p1)\n if round(bete, 5) == round(math.pi * 2, 5):\n # print(round(bete, 5), round(math.pi * 2, 5))\n return True\n else:\n # print(round(bete, 5), round(math.pi * 2, 5))\n return False\n\n\nclass Box: # 每N个石墨烯片层能得到N-1个Box实例,但有些情况下会舍弃某片层\n def __init__(self, flat_1: Flat, flat_2: Flat):\n \"\"\"\n\n :param flat_1: y值小的平面\n :param flat_2: y值大的平面\n \"\"\"\n if flat_1.y > flat_2.y:\n self.flat_1, self.flat_2 = flat_2, flat_1\n elif flat_1.y < flat_2.y:\n self.flat_1, self.flat_2 = flat_1, flat_2\n\n def if_inside(self, p):\n # print(\"\\n读入点\", p.coordinate, \"...\")\n if p.y > self.flat_2.y or p.y < self.flat_1.y: # 如果点p的y坐标不在两片层之间,返回否\n # print(\"点\", p.coordinate, \"的y坐标不在两片层之间。\")\n return False\n elif not self.flat_1.if_cover(p): # 如果下平面没有覆盖到点p,返回否\n # print(\"下平面没有覆盖到点\", p.coordinate, \"。\")\n return False\n\n elif not self.flat_2.if_cover(p):\n # print(\"上平面没有覆盖到点\", p.coordinate, \"。\")\n return False\n # print(\"点\", p.coordinate, \"的y坐标在两片层之间,且投影被上下平面均覆盖到。\")\n return True # 否则就只能在两片层之间了咯\n\n\nclass GrapheneC(Point):\n def __init__(self, x, z, y, n, name):\n Point.__init__(self, x, z, y)\n self.n = n # n是指这是第几片石墨烯里面的碳\n self.name = name # 名字(pdb里面的总序号)\n\n\nclass CO2C(Point):\n def __init__(self, x, z, y, name, mol_n):\n Point.__init__(self, x, z, y)\n self.name = name # 名字(pdb里面的总序号)\n self.mol_n = mol_n\n\n\nclass GraShp(Flat):\n def __init__(self, c1, c2, c3, c4):\n Flat.__init__(self, c1, c2, c3, c4)\n\n\ndef main():\n print(\"==========================================\\n测试内容开始\\n\")\n grac = GrapheneC(0, 0, 0.2, 1, None)\n print(grac.coordinate)\n\n P1 = Point(0, 0, 0.2)\n P2 = Point(3, 0, -0.1)\n P3 = Point(3, 2, 0.1)\n P4 = Point(0, 2, 0)\n\n P5 = Point(1, 0, 2.8)\n P6 = Point(2, 0, 3.1)\n P7 = Point(2, 3, 3.4)\n P8 = Point(0, 3, 2.9)\n\n FLAT_1 = Flat(P1, P2, P3, P4)\n FLAT_1.tell_corner()\n\n FLAT_2 = Flat(P5, P6, P7, P8)\n FLAT_2.tell_corner()\n\n box = Box(FLAT_1, FLAT_2)\n result = box.if_inside(Point(1.5, 1, 2))\n print(result)\n\n result_1 = box.if_inside(Point(0.1, 0, 0))\n print(result_1)\n print(\"\\n测试内容结束\\n==========================================\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"AncientTree/for_simulation","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":4791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42259790017","text":"from algo.search import binary\nfrom algo.search.common import test_it\n\n\ndef find(lst, val):\n if lst[0] == val:\n return 0\n\n i = 1\n while i < len(lst) and lst[i] <= val:\n i *= 2\n return binary.find(lst, val, i // 2, min(i, len(lst)))\n\n\nif __name__ == '__main__':\n test_it(find)\n","repo_name":"urm8/ads","sub_path":"algo/search/exponential.py","file_name":"exponential.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35441605904","text":"class NestedIterator:\r\n def __init__(self, nestedList: [NestedInteger]):\r\n self.nestedList = nestedList\r\n self.size = len(nestedList) if nestedList else 0\r\n self.iterator = None\r\n self.i = 0\r\n \r\n def next(self) -> int:\r\n cur = self.nestedList[self.i]\r\n if cur.isInteger():\r\n self.i += 1\r\n return cur.getInteger()\r\n else: return self.iterator.next()\r\n \r\n def hasNext(self) -> bool:\r\n while self.i < self.size:\r\n cur = self.nestedList[self.i]\r\n if cur.isInteger(): return True\r\n else:\r\n if self.iterator == None:\r\n self.iterator = NestedIterator(cur.getList())\r\n if self.iterator.hasNext(): return True\r\n else: \r\n self.iterator = None\r\n self.i += 1\r\n return False\r\n\r\n\"\"\"\r\n评论区大佬的解法,有可取之处,我就贴过来了\r\n重点在self.iterator,解法本质上是class的递归\r\n\"\"\"","repo_name":"kikihiter/LeetCode2","sub_path":"Everyday/No341s.py","file_name":"No341s.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"12751609058","text":"import pickle\nimport docx\n\nfor ds in ['lap', 'open']:\n doc = docx.Document()\n doc.add_heading('Table 2', 0)\n table = doc.add_table(rows=1, cols=6)\n\n row = table.rows[0].cells\n row[0].text = 'Algo Name'\n row[1].text = \"Task\"\n row[2].text = 'Accuracy'\n row[3].text = 'Specificity'\n row[4].text = 'Sensitivity'\n row[5].text = 'AUC'\n\n with open(f'{ds}_test_results.pkl', 'rb') as f:\n while True:\n try:\n a = pickle.load(f)\n tar_ = a.__dict__[\"prediction_target\"]\n keys_ = a.__dict__['folds'][0].__dict__\n row = table.add_row().cells\n row[0].text = a.__dict__['folds'][0].__dict__[\"trained_classifier\"].__class__.__name__\n row[1].text = tar_\n row[2].text = str(\"{:.2f}\".format(a.acc_avg())) # accuracy\n row[3].text = str(\"{:.2f}\".format(a.specificity_avg())) # specificity\n row[4].text = str(\"{:.2f}\".format(a.sensitivity_avg())) # sensitivity\n row[5].text = str(\"{:.2f}\".format(a.auc_avg())) # auc\n except EOFError:\n break\n table.style = 'Medium Grid 3 Accent 1'\n doc.save(f\"results/{ds}_table2.docx\")\n","repo_name":"isears/NIS-CR-ComplicationPredictor","sub_path":"table2.py","file_name":"table2.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"19515643647","text":"import utime\nfrom machine import RTC\n\nrtc = RTC()\nyear = 2019\nmonth = 8\nday = 28\nhour = 19\nminute = 57\nsecond = 0\nsubsecond = 0\n\n# update internal RTC\nrtc.datetime((year, month, day, 0, hour, minute, second, subsecond))\n\n# generate formated date/time strings from internal RTC\n #date_str = \"{:02}/{:02}/{:4}\".format(rtc.datetime()[1], rtc.datetime()[2], rtc.datetime()[0])\n #time_str = \"{:02}:{:02}:{:02}\".format(rtc.datetime()[4], rtc.datetime()[5], rtc.datetime()[6])\n\n\"\"\"\n à faire: dans le \"main\" : utiliser le utime.localtime() pour aller chercher la vraie heure.\n print(utime.localtime())\n date_str = \"{:02}/{:02}/{:4}\".format(utime.localtime()[1], utime.localtime()[2], utime.localtime()[0])\n time_str = \"{:02}:{:02}:{:02}\".format(utime.localtime()[3], utime.localtime()[4], utime.localtime()[5])\n faire une classe qui avec un def update() qui va updater le date_str et le time_str\n aussi:\n import time\n print(time.localtime())\n\"\"\"\n\n#End\n","repo_name":"pascal1062/micropython","sub_path":"esp32/ioboard-test-2/rtclock.py","file_name":"rtclock.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41584578582","text":"import sys\nsys.path.append(\"module\")\nfrom prints import *\n\nimport random\n\nfile = open(\"texts/Words.txt\",\"r\")\nWords = file.readlines()\nfile.close()\nfile = open(\"texts/Learnings.txt\",\"a\")\nRandomIndex = random.randint(0,len(Words))\ntheWord = Words.pop(RandomIndex)\noutputMessage(theWord)\nsoftwareMessage(\"Do you want to add this word to your Learning file? (y or n) : \")\nLearningChoose = input(\"\")\nif LearningChoose == \"y\":\n file.write(theWord+\"\\n\")\n file.close()\n file = open(\"texts/Words.txt\",\"w\")\n file.writelines(Words)\n file.close()\n softwareMessage(\"Successfully added to Learning file\")\nelif LearningChoose == \"n\":\n file.close()\n softwareMessage(\"Word didn\\'t added\")\nelse:\n errorMessage(\"An error occured\")\n","repo_name":"theHapalua/Learn-3000-Word","sub_path":"Words.py","file_name":"Words.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"31049715535","text":"import time\nimport telebot\nfrom deeppavlov import build_model\nfrom deeppavlov.core.common.file import read_json\n\nbot = telebot.TeleBot('5678654199:AAGqvTEHAdXE3mPdkDbc-x6kMkN5Pvqop4w')\n\nmodel_config = read_json('squad_ru_bert_infer.json')\nmodel = build_model(model_config, download=True)\n\nfile_name = \"C:\\\\Users\\\\amir1\\\\PycharmProjects\\\\ChatBot\\\\pyshkin.txt\"\naccess_mode = \"r\"\ntext = open(file_name, access_mode, encoding=\"utf8\")\nprint(model(['DeepPavlov is library for NLP and dialog systems.'], ['What is DeepPavlov?']))\n\n\n\n# @bot.message_handler(commands=['start'])\n# def start(message):\n# bot.send_message(message.chat.id, \"Добро пожаловать!\")\n#\n#\n# @bot.message_handler(commands=[\"raz\"])\n# def raz(message):\n# bot.send_message(message.chat.id, text=\"Раз Два Три\")\n\n\n\n\n\n@bot.message_handler(content_types=['text'])\ndef get_text_messages(message):\n # print(message.text)\n # for xer in message.items():\n # if xer['text'] == 'asdasdasd':\n # print(xer)\n # break\n bot.send_message(message.from_user.id, model(text, message.text))\n # if message.text == \"Привет\":\n # bot.send_message(message.from_user.id, \"Здравствуй\")\n # elif message.text == '/help':\n # bot.send_message(message.from_user.id, \"Напиши 'Привет'\")\n # else:\n # bot.send_message(message.from_user.id, \"Я Вас не понял, напишите /help.\")\n\n\n# bot.polling(none_stop=True)\n#\n# print(\"Bot listening\")\n#\n\nprint(\"Bot listening\")\n\nwhile True:\n try:\n bot.polling(none_stop=True)\n except Exception as e:\n print(e)\n time.sleep(15)\n","repo_name":"Tragidra/Telegramm-bot-1","sub_path":"rr.py","file_name":"rr.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22807108817","text":"#!/usr/bin/env python3\n\n#Escriba una funcion que recorra una lista de cadenas imprimiendo la longitud\n#de cada una. Que pasa si le pasa un entero a len()?\n\nlista = [\"uno\",\"dos\",\"tres\",\"cuatro\"]\nlistaint = [1,2,3,4]\ndef Recorrer (lista):\n for item in lista:\n print (len(item))\n\nRecorrer (lista)\n\n#Si es una lista de enteros, te dice que el tipo int no tiene longitud\n","repo_name":"DamianNery/Tecnicas-De-Programacion","sub_path":"4Abril/27/RecorrerLista.py","file_name":"RecorrerLista.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"854885004","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis module contains the package interface to the demonstration class\n\"Spreadsheet\".\n\nEach package interface will be a subclass of the PackageInterface and certain\nattributes and methods must be defined for these to become \"concrete\".\n\nThe attribute to be defined is called \"id_map\". This is the mapping from\nthe internal data catalog to names used locally by the module for inputs.\n\nThere are 3 methods that also must be defined, as follows:\n\nThe methods \"declare_inputs\" and \"declare_outputs\" declare which variables from\nthe internal data catalog are used as inputs and which are provided as outputs.\n\nThe method \"connect\" is used to execute the function. Inputs can be collected\nusing the method \"get_local\" using the local variable name as the argument.\nThe outputs should be stored using the method \"set_local\", where the local\nvariable name and the data must be given.\n\nNote:\n The function decorators (such as \"@classmethod\", etc) must not be removed.\n\n.. module:: demo\n :platform: Windows\n :synopsis: Aneris interface for dtocean_dummy package\n \n.. moduleauthor:: Mathew Topper \n\"\"\"\n\n\nfrom aneris.boundary.interface import MapInterface, MaskVariable\nfrom dtocean_dummy import Spreadsheet\n\nclass DemoInterface(MapInterface):\n \n '''Class of interfaces for the purposes of this test.\n '''\n\n\nclass TableInterface(DemoInterface):\n \n '''Interface to the Spreadsheet class of dtocean_dummy, providing a table\n of random numbers.\n \n '''\n \n @classmethod\n def get_name(cls):\n \n return \"Spreadsheet Generator\"\n\n @classmethod \n def declare_inputs(cls):\n \n '''Declare all the variables required as inputs by this interface.\n\n Returns:\n list: List of internal variables names required as inputs.\n \n '''\n\n input_list = [MaskVariable('demo:demo:low',\n 'trigger.bool',\n [True]),\n MaskVariable('demo:demo:high',\n 'trigger.bool',\n [True]),\n 'demo:demo:rows'\n ]\n \n return input_list\n\n @classmethod \n def declare_outputs(cls):\n \n '''Declare all the variables provided as outputs by this interface.\n \n Returns:\n list: List of internal variables names provided as outputs.\n '''\n \n output_list = ['demo:demo:table',\n ]\n \n return output_list\n \n @classmethod \n def declare_optional(cls):\n \n optional_list = ['demo:demo:low',\n 'demo:demo:high',\n ]\n \n return optional_list\n \n @classmethod\n def declare_id_map(cls):\n \n '''Declare the mapping between the internal variable names and local\n variable names.\n \n Returns:\n dict: Dictionary mapping of variable names, each entry being of the\n form \"'local_name' : 'internal:name'\".\n '''\n \n id_map = {'low': 'demo:demo:low',\n 'high': 'demo:demo:high',\n 'rows': 'demo:demo:rows',\n 'table': 'demo:demo:table'}\n \n return id_map\n \n def connect(self):\n \n '''This fucntion is used to extract the data from the interfacing\n package.\n \n Note: methods get_local and set_local are used to get the inputs and\n provide the outputs to and from the interface.\n '''\n \n rows = self.data.rows\n \n # Build optional data\n config = {}\n \n if self.data.low is not None: config[\"low\"] = self.data.low\n if self.data.high is not None: config[\"high\"] = self.data.high\n \n sheet = Spreadsheet(**config)\n\n sheet.make_table(rows)\n table_data = sheet.table.to_dict()\n \n self.data.table = table_data\n \n return\n \nclass LaterInterface(DemoInterface):\n \n '''Interface to test outputs generated later than table interface\n \n '''\n \n @classmethod\n def get_name(cls):\n \n return \"Later Interface\"\n\n @classmethod \n def declare_inputs(cls):\n \n '''Declare all the variables required as inputs by this interface.\n\n Returns:\n list: List of internal variables names required as inputs.\n \n '''\n\n input_list = ['demo:demo:rows']\n \n return input_list\n\n @classmethod \n def declare_outputs(cls):\n \n '''Declare all the variables provided as outputs by this interface.\n \n Returns:\n list: List of internal variables names provided as outputs.\n '''\n \n output_list = ['demo:demo:table',\n ]\n \n return output_list\n \n @classmethod \n def declare_optional(cls):\n \n return None\n \n @classmethod \n def declare_id_map(cls):\n\n id_map = {'rows': 'demo:demo:rows',\n 'table': 'demo:demo:table'}\n \n return id_map\n \n def connect(self):\n \n return\n","repo_name":"DTOcean/aneris","sub_path":"tests/interface_plugins/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":5489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12007229659","text":"import matplotlib.pyplot as plt\nimport pry\n\n\ndef plot():\n with open(\"./../slurm_output/slurm-37891.out\", \"r\") as f:\n content = f.readlines()\n loss = []\n PSNR = []\n iteration = []\n for i, val in enumerate(content):\n if val[:7] == \"[TRAIN]\":\n x = val.split()\n for k, data in enumerate(x):\n if data == \"Iter:\":\n iteration.append(int(x[k+1]))\n elif data == \"Loss:\":\n loss.append(float(x[k+1][:-1]))\n elif data == \"PSNR:\":\n PSNR.append(float(x[k+1]))\n fig = plt.figure()\n plt.subplot(2, 1, 1)\n plt.plot(iteration, loss)\n plt.ylabel(\"Loss\")\n plt.subplot(2, 1, 2)\n plt.xlabel(\"Iteration\")\n plt.ylabel(\"PSNR\")\n plt.plot(iteration, PSNR, \"red\", linewidth=1)\n plt.savefig(\"Loss-PSNR.png\")\n plt.show()\n\n\nif __name__ == \"__main__\":\n plot()","repo_name":"zen1405/Nerf-SFM-P3","sub_path":"Phase2/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9168238540","text":"\nimport cv2\nimport numpy as np\nimport os,sys\nimport math as m\nimport pandas as pd\n\ndef showLinkedTracks(filename, start, frames, outputfilename):\n \n\n linkedDF = pd.read_csv('../classify/output.csv') \n \n \n \n\n \n cap = cv2.VideoCapture(filename)\n cap.set(cv2.CAP_PROP_POS_FRAMES,start)\n \n S = (1920,1080)\n out = cv2.VideoWriter('out.avi', cv2.VideoWriter_fourcc('M','J','P','G'), 2, S, True)\n \n for tt in range(frames):\n \n # Capture frame-by-frame\n _, frame = cap.read()\n if (tt%15) > 0 : continue\n thisFrame = linkedDF.ix[linkedDF['frame']==tt]\n\n \n # draw detected objects and display\n sz=6\n \n for i, row in thisFrame.iterrows():\n #if int(row['particle'])!=628:\n # continue\n \n #cv2.putText(frame ,str(int(row['particle'])) ,((int(row['x'])+12, int(row['y'])+12)), cv2.FONT_HERSHEY_SIMPLEX, 0.8,255,2)\n cv2.rectangle(frame, ((int( row['x'])-sz, int( row['y'])-sz)),((int( row['x'])+sz, int( row['y'])+sz)),(0,0,0),2)\n \n cv2.imshow('frame',frame)\n out.write(frame)\n k = cv2.waitKey(30) & 0xff\n if k == 27:\n break\n \n\n cv2.destroyAllWindows()\n cap.release()\n out.release()\n\nif __name__ == '__main__':\n FULLNAME ='/home/ctorney/data/wildebeest/test.avi'# 0 3600\n\n # sys.argv[1]\n frameStart = 0\n frameLength = 3600# int(sys.argv[3])\n path, filename = os.path.split(FULLNAME)\n noext, ext = os.path.splitext(filename)\n allTransforms=np.zeros((frameLength,3))\n outputfilename = noext + '.csv' \n showLinkedTracks(FULLNAME, frameStart, frameLength, outputfilename)\n","repo_name":"cirrus-project/wildTrack","sub_path":"visualize/showTracks.py","file_name":"showTracks.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22044682205","text":"class Product:\n def __init__(self, name=\"\", nutrition_values=[0], tabu_time=1, weight_resolution=1, min_weight=1,\n max_weight=10, weight=1):\n self.name = name\n self.nutrition_values = nutrition_values\n self.tabu_time = tabu_time\n self.weight_resolution = weight_resolution\n self.min_weight = min_weight\n self.max_weight = max_weight\n if min_weight > max_weight:\n self.max_weight = min_weight\n self.min_weight = max_weight\n self.correct_weight(weight)\n self.general_product = False\n\n # funkcja służąca do naprawy wagi, by mieściła się w widelkach\n def correct_weight(self, weight):\n if self.min_weight <= weight <= self.max_weight:\n self.weight = weight\n elif weight < self.min_weight:\n self.weight = self.min_weight\n else:\n self.weight = self.max_weight\n\n # funkcja oblicza maksymalną liczbę jednostek wagowych, które nie przekroczą zapotrzebowania\n # na wartość odżywczą o indeksie nutrition_index i wadze nutrition_weight\n def get_max_weight(self,nutrition_index,nutrition_weight):\n if self.min_weight*self.weight_resolution*self.nutrition_values[nutrition_index] <= nutrition_weight:\n for i in range(self.max_weight,self.min_weight -1, -1):\n if i*self.weight_resolution*self.nutrition_values[nutrition_index] <= nutrition_weight :\n return i\n else:\n return 0\n\n","repo_name":"amalkows/alhe_project","sub_path":"src/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43021856261","text":"\"\"\"\nRoll the dice to decide what to do\n\nVersion: 0.1\nAuthor: Luo Hao\nDate: 2018-02-28\n\"\"\"\nfrom random import randint\n\nface = randint(1, 6)\nif face == 1:\n result = 'clicker'\nelif face == 2:\n result = 'dance'\nelif face == 3:\n result = 'Learn to bark'\nelif face == 4:\n result = 'Do push-ups'\nelif face == 5:\n result = 'recite the tongue twister'\nelse:\n result = 'tell a bad joke'\nprint(result)","repo_name":"ag143/python","sub_path":"Python-100-Days-master/Day01-15/code/Day03/rolldice.py","file_name":"rolldice.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24581863301","text":"\"\"\"\r\nThis is a set up script for py2exe\r\n\r\nUSAGE: python setup-win py2exe\r\n\r\n\"\"\"\r\n\r\nfrom distutils.core import setup\r\nimport matplotlib as mpl\r\nimport py2exe\r\n\r\nimport os\r\n\r\n# Remove the build folder, a bit slower but ensures that build contains the latest\r\nimport shutil\r\nshutil.rmtree(\"build\", ignore_errors=True)\r\nshutil.rmtree(\"dist\", ignore_errors=True)\r\n\r\n\r\nINCLUDES = [\r\n \"sip\",\r\n \"serial\",\r\n \"scipy.special._ufuncs_cxx\",\r\n ]\r\n\r\nEXCLUDES = ['_tkagg',\r\n '_ps',\r\n '_fltkagg',\r\n 'Tkinter',\r\n 'Tkconstants',\r\n '_cairo',\r\n '_gtk',\r\n 'gtkcairo',\r\n 'pydoc',\r\n 'sqlite3',\r\n 'bsddb',\r\n 'curses',\r\n 'tcl',\r\n '_wxagg',\r\n '_gtagg',\r\n '_cocoaagg',\r\n '_wx']\r\n\r\n\r\n# current version of Artisan\r\n\r\nimport artisanlib\r\n\r\nVERSION = artisanlib.__version__\r\nLICENSE = 'GNU General Public License (GPL)'\r\n\r\ncwd = os.getcwd()\r\n\r\nDATAFILES = mpl.get_py2exe_datafiles()\r\nDATAFILES = DATAFILES + \\\r\n [('plugins\\imageformats', [\r\n 'c:\\Python27\\Lib\\site-packages\\PyQt4\\plugins\\imageformats\\qsvg4.dll',\r\n 'c:\\Python27\\Lib\\site-packages\\PyQt4\\plugins\\imageformats\\qgif4.dll',\r\n 'c:\\Python27\\Lib\\site-packages\\PyQt4\\plugins\\imageformats\\qtiff4.dll',\r\n 'c:\\Python27\\Lib\\site-packages\\PyQt4\\plugins\\imageformats\\qjpeg4.dll',\r\n ]),\r\n ('plugins\\iconengines', [\r\n 'c:\\Python27\\Lib\\site-packages\\PyQt4\\plugins\\iconengines\\qsvgicon4.dll',\r\n ]),\r\n ]\r\n\r\nsetup(\r\n name =\"Artisan\",\r\n version=VERSION,\r\n author='YOUcouldbeTOO',\r\n author_email='zaub.ERASE.org@yahoo.com',\r\n license=LICENSE,\r\n windows=[{\"script\" : cwd + \"\\\\artisan.py\",\r\n \"icon_resources\": [(0, cwd + \"\\\\artisan.ico\")]\r\n }],\r\n data_files = DATAFILES,\r\n zipfile = \"lib\\library.zip\",\r\n options={\"py2exe\" :{\r\n \"packages\": ['matplotlib','pytz'],\r\n \"compressed\": False, # faster\r\n \"unbuffered\": True,\r\n 'optimize': 2,\r\n \"bundle_files\": 2, # default bundle_files: 3 breaks WebLCDs on Windows\r\n \"dll_excludes\":[\r\n 'MSVCP90.dll','tcl84.dll','tk84.dll','libgdk-win32-2.0-0.dll',\r\n 'libgdk_pixbuf-2.0-0.dll','libgobject-2.0-0.dll',\r\n 'MSVCR90.dll','MSVCN90.dll','mwsock.dll','powrprof.dll'],\r\n \"includes\" : INCLUDES,\r\n \"excludes\" : EXCLUDES}\r\n }\r\n )\r\n\r\nos.system(r'copy README.txt dist')\r\nos.system(r'copy LICENSE.txt dist')\r\nos.system(r'copy qt-win.conf dist\\\\qt.conf')\r\nos.system(r'mkdir dist\\\\Wheels')\r\nos.system(r'mkdir dist\\\\Wheels\\\\Cupping')\r\nos.system(r'mkdir dist\\\\Wheels\\\\Other')\r\nos.system(r'mkdir dist\\\\Wheels\\\\Roasting')\r\nos.system(r'copy Wheels\\\\Cupping\\\\* dist\\\\Wheels\\\\Cupping')\r\nos.system(r'copy Wheels\\\\Other\\\\* dist\\\\Wheels\\\\Other')\r\nos.system(r'copy Wheels\\\\Roasting\\\\* dist\\\\Wheels\\\\Roasting')\r\nos.system(r'mkdir dist\\\\translations')\r\nos.system(r'copy translations\\\\*.qm dist\\\\translations')\r\nos.system(r'copy c:\\\\Qt\\\\4.8.6\\\\translations\\\\qt_de.qm dist\\\\translations')\r\nos.system(r'copy c:\\\\Qt\\\\4.8.6\\\\translations\\\\qt_es.qm dist\\\\translations')\r\nos.system(r'copy c:\\\\Qt\\\\4.8.6\\\\translations\\\\qt_fr.qm dist\\\\translations')\r\nos.system(r'copy c:\\\\Qt\\\\4.8.6\\\\translations\\\\qt_sv.qm dist\\\\translations')\r\nos.system(r'copy c:\\\\Qt\\\\4.8.6\\\\translations\\\\qt_zh_CN.qm dist\\\\translations')\r\nos.system(r'copy c:\\\\Qt\\\\4.8.6\\\\translations\\\\qt_zh_TW.qm dist\\\\translations')\r\nos.system(r'copy c:\\\\Qt\\\\4.8.6\\\\translations\\\\qt_ko.qm dist\\\\translations')\r\nos.system(r'copy c:\\\\Qt\\\\4.8.6\\\\translations\\\\qt_pt.qm dist\\\\translations')\r\nos.system(r'copy c:\\\\Qt\\\\4.8.6\\\\translations\\\\qt_ru.qm dist\\\\translations')\r\nos.system(r'copy c:\\\\Qt\\\\4.8.6\\\\translations\\\\qt_ar.qm dist\\\\translations')\r\nos.system(r'copy c:\\\\Qt\\\\4.8.6\\\\translations\\\\qt_ja.qm dist\\\\translations')\r\nos.system(r'copy c:\\\\Qt\\\\4.8.6\\\\translations\\\\qt_hu.qm dist\\\\translations')\r\nos.system(r'copy c:\\\\Qt\\\\4.8.6\\\\translations\\\\qt_pl.qm dist\\\\translations')\r\nos.system(r'rmdir /q /s dist\\\\mpl-data\\\\sample_data')\r\n# YOCTO HACK BEGIN: manually copy over the dlls\r\nos.system(r'mkdir dist\\\\lib')\r\nos.system(r'copy c:\\\\Python27\\\\Lib\\\\site-packages\\\\yoctopuce\\\\cdll\\\\yapi.dll dist\\\\lib')\r\nos.system(r'copy c:\\\\Python27\\\\Lib\\\\site-packages\\\\yoctopuce\\\\cdll\\\\yapi64.dll dist\\\\lib')\r\n# YOCTO HACK END\r\nos.system(r'copy artisan.png dist')\r\nos.system(r'copy artisanAlarms.ico dist')\r\nos.system(r'copy artisanProfile.ico dist')\r\nos.system(r'copy artisanPalettes.ico dist')\r\nos.system(r'copy artisanWheel.ico dist')\r\nos.system(r'copy includes\\\\Humor-Sans.ttf dist')\r\nos.system(r'copy includes\\\\alarmclock.eot dist')\r\nos.system(r'copy includes\\\\alarmclock.svg dist')\r\nos.system(r'copy includes\\\\alarmclock.ttf dist')\r\nos.system(r'copy includes\\\\alarmclock.woff dist')\r\nos.system(r'copy includes\\\\artisan.tpl dist')\r\nos.system(r'copy includes\\\\bigtext.js dist')\r\nos.system(r'copy includes\\\\jquery-1.11.1.min.js dist')\r\nos.system(r'copy ..\\\\vcredist_x86.exe dist')\r\n","repo_name":"send2vinnie/artisan","sub_path":"setup-win.py","file_name":"setup-win.py","file_ext":"py","file_size_in_byte":5267,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"35374514617","text":"num = int(input(\"enter the number = \"))\r\nstore = num\r\nnum2 = 0 \r\ni = 1\r\nwhile(num!=0):\r\n i = num%10\r\n num2 = num2*10 + i\r\n num = num//10\r\n \r\nif(num2 == store):\r\n print(store , \"is a palindrome number \")\r\nelse:\r\n print(store , \"is not a palindrome number\") \r\n","repo_name":"abhayg76/pythonprob_atulya","sub_path":"QUESTION 11.py","file_name":"QUESTION 11.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16027311004","text":"# 소가 길을 건너간 이유5, 실버2, 누적 합\n# 연속한 k개의 신호등이 존재 하도록 수리할 것\n\nfrom sys import stdin\n\nn, k, b = map(int, stdin.readline().split())\na = [0 for _ in range(n+1)]\npsum = [0 for _ in range(n+1)]\nfor i in range(b):\n h = int(stdin.readline())\n a[h] = 1\n\nfor i in range(1, n+1):\n psum[i] = psum[i-1] + a[i]\n\nans = b\nfor j in range(k, n+1):\n ans = min(ans, psum[j] - psum[j-k])\n\nprint(ans)","repo_name":"lookinmin/CodingTest","sub_path":"누적합/BOJ_14465.py","file_name":"BOJ_14465.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12896449594","text":"class Solution:\n def originalDigits(self, s: str) -> str:\n # approach\n \n # create a list of numbers 0-9 in english\n \n # zero - number of z's since it's the only one that has a z\n # one - number of o's minus counts for others with an o: zero, two, four\n # two - number of w's\n # three - number of t's minus counts for others with a 't': two and eight\n # four - number of u's\n # five - number of f's minus count for others with f: four\n # six - number of x's\n # seven - number of s's minus count for others with s: six\n # eight - number of g's\n # nine - number of i's minus count for others with i: eight: six, five\n \n # build {'a': 1, 'b': 2, 'c': 3}\n lookup = Counter(s)\n \n result = \"\"\n result += \"0\"*(lookup['z'])\n result += \"1\"*(lookup['o'] - lookup['z'] - lookup['w'] - lookup['u'])\n result += \"2\"*(lookup['w'])\n result += \"3\"*(lookup['t'] - lookup['w'] - lookup['g'])\n result += \"4\"*(lookup['u'])\n result += \"5\"*(lookup['f'] - lookup['u'])\n result += \"6\"*(lookup['x'])\n result += \"7\"*(lookup['s'] - lookup['x'])\n result += \"8\"*(lookup['g'])\n result += \"9\"*(lookup['i'] - lookup['g'] - lookup['x'] - (lookup['f'] - lookup['u']))\n \n return result\n \n \n ","repo_name":"peter-lucia/leetcode","sub_path":"problems/reconstruct_original_digits_from_english/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5197599653","text":"import modules.funcoes as app\n\noriginal_img = r'img\\amostras\\AmostraAleatoria.jpg'\nimage_path = r'imagem_redimensionada.jpg'\n\nparams = app.params = {\n 'gradient_ksize': 1, # Tamanho do kernel para cálculo do gradiente\n 'threshold_value': 20, # Valor de limiar para binarização\n 'markers_ksize': 4, # Tamanho do kernel para cálculo dos marcadores\n 'contraste': 1.4,\n 'brilho': 0.0\n }\n\ngradiente = app.show_gradient_magnitude(image_path,contraste=None,brilho=None)\nsegmentada = app.watersheed_image(image_path,params)\n#imagem = app.carregar_imagem(original_img, largura_padrao=680)\n\n\n'''\n# Usar para a amostra img\\amostras\\AmostraReferencia.jpg:\n\nparams = app.params = {\n 'gradient_ksize': 1, # Tamanho do kernel para cálculo do gradiente\n 'threshold_value': 10, # Valor de limiar para binarização\n 'markers_ksize': 1, # Tamanho do kernel para cálculo dos marcadores\n 'contraste': 1.6,\n 'brilho': 30.0\n }\n'''","repo_name":"gabrslen/app_img_plantas","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12997690345","text":"import math\n\nN, W, H = map(int, input().split())\n\nc = math.sqrt(W**2 + H**2)\n\nfor _ in range(N):\n length = int(input())\n\n if length <= c:\n print('DA')\n else:\n print('NE')","repo_name":"JLMadsen/Kattis","sub_path":"sibice.py","file_name":"sibice.py","file_ext":"py","file_size_in_byte":193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10333395903","text":"from typing import List\n\n# noinspection PyUnresolvedReferences\nfrom expression.expr.arithmetic import (\n Constant,\n Term,\n Add,\n Neg,\n Sub,\n Mul,\n Div,\n)\n\n\ndef parse(s: str):\n s = s.replace(\"(\", \" ( \")\n s = s.replace(\")\", \" ) \")\n while \" \" in s:\n s = s.replace(\" \", \" \")\n s = s.strip()\n tokens = list(reversed(s.split(\" \")))\n assert tokens\n term = parse_add_sub(tokens)\n assert not tokens\n return term\n\n\ndef parse_terminal(tokens) -> Term:\n token = tokens.pop(0)\n if token.isnumeric():\n return Constant(int(token))\n elif token == \")\":\n term = parse_add_sub(tokens)\n token = tokens.pop(0)\n assert token == \"(\"\n return term\n else:\n assert False\n\n\ndef parse_neg(tokens) -> Term:\n term = parse_terminal(tokens)\n if tokens and tokens[0] in \"~\":\n tokens.pop(0)\n return Neg(term)\n else:\n return term\n\n\ndef parse_mul_div(tokens: List[str]) -> Term:\n term = parse_neg(tokens)\n if tokens and tokens[0] in \"*/\":\n token = tokens.pop(0)\n if token == \"*\":\n return Mul(parse_mul_div(tokens), term)\n else:\n return Div(parse_mul_div(tokens), term)\n else:\n return term\n\n\ndef parse_add_sub(tokens: List[str]) -> Term:\n term = parse_mul_div(tokens)\n if tokens and tokens[0] in \"+-\":\n token = tokens.pop(0)\n if token == \"+\":\n return Add(parse_add_sub(tokens), term)\n else:\n return Sub(parse_add_sub(tokens), term)\n else:\n return term\n","repo_name":"smythi93/expression","sub_path":"src/expression/expr/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"28679984065","text":"import os\n\nfrom django.core.management import call_command, CommandError\nfrom django.test import SimpleTestCase\nfrom django.test.utils import override_settings\nfrom django.utils import translation\nfrom django.utils._os import upath\nfrom django.utils.six import StringIO\n\ntest_dir = os.path.abspath(os.path.dirname(upath(__file__)))\n\n\nclass MessageCompilationTests(SimpleTestCase):\n\n def setUp(self):\n self._cwd = os.getcwd()\n self.addCleanup(os.chdir, self._cwd)\n os.chdir(test_dir)\n\n def rmfile(self, filepath):\n if os.path.exists(filepath):\n os.remove(filepath)\n\n\nclass PoFileTests(MessageCompilationTests):\n\n LOCALE = 'es_AR'\n MO_FILE = 'locale/%s/LC_MESSAGES/django.mo' % LOCALE\n\n def test_bom_rejection(self):\n with self.assertRaises(CommandError) as cm:\n call_command('compilemessages', locale=[self.LOCALE], stdout=StringIO())\n self.assertIn(\"file has a BOM (Byte Order Mark)\", cm.exception.args[0])\n self.assertFalse(os.path.exists(self.MO_FILE))\n\n\nclass PoFileContentsTests(MessageCompilationTests):\n # Ticket #11240\n\n LOCALE='fr'\n MO_FILE='locale/%s/LC_MESSAGES/django.mo' % LOCALE\n\n def setUp(self):\n super(PoFileContentsTests, self).setUp()\n self.addCleanup(os.unlink, os.path.join(test_dir, self.MO_FILE))\n\n def test_percent_symbol_in_po_file(self):\n call_command('compilemessages', locale=[self.LOCALE], stdout=StringIO())\n self.assertTrue(os.path.exists(self.MO_FILE))\n\n\nclass PercentRenderingTests(MessageCompilationTests):\n # Ticket #11240 -- Testing rendering doesn't belong here but we are trying\n # to keep tests for all the stack together\n\n LOCALE='it'\n MO_FILE='locale/%s/LC_MESSAGES/django.mo' % LOCALE\n\n def setUp(self):\n super(PercentRenderingTests, self).setUp()\n self.addCleanup(os.unlink, os.path.join(test_dir, self.MO_FILE))\n\n @override_settings(LOCALE_PATHS=(os.path.join(test_dir, 'locale'),))\n def test_percent_symbol_escaping(self):\n from django.template import Template, Context\n call_command('compilemessages', locale=[self.LOCALE], stdout=StringIO())\n with translation.override(self.LOCALE):\n t = Template('{% load i18n %}{% trans \"Looks like a str fmt spec %% o but shouldn\\'t be interpreted as such\" %}')\n rendered = t.render(Context({}))\n self.assertEqual(rendered, 'IT translation contains %% for the above string')\n\n t = Template('{% load i18n %}{% trans \"Completed 50%% of all the tasks\" %}')\n rendered = t.render(Context({}))\n self.assertEqual(rendered, 'IT translation of Completed 50%% of all the tasks')\n\n\n@override_settings(LOCALE_PATHS=(os.path.join(test_dir, 'locale'),))\nclass MultipleLocaleCompilationTests(MessageCompilationTests):\n MO_FILE_HR = None\n MO_FILE_FR = None\n\n def setUp(self):\n super(MultipleLocaleCompilationTests, self).setUp()\n localedir = os.path.join(test_dir, 'locale')\n self.MO_FILE_HR = os.path.join(localedir, 'hr/LC_MESSAGES/django.mo')\n self.MO_FILE_FR = os.path.join(localedir, 'fr/LC_MESSAGES/django.mo')\n self.addCleanup(self.rmfile, os.path.join(localedir, self.MO_FILE_HR))\n self.addCleanup(self.rmfile, os.path.join(localedir, self.MO_FILE_FR))\n\n def test_one_locale(self):\n call_command('compilemessages', locale=['hr'], stdout=StringIO())\n\n self.assertTrue(os.path.exists(self.MO_FILE_HR))\n\n def test_multiple_locales(self):\n call_command('compilemessages', locale=['hr', 'fr'], stdout=StringIO())\n\n self.assertTrue(os.path.exists(self.MO_FILE_HR))\n self.assertTrue(os.path.exists(self.MO_FILE_FR))\n\n\nclass CompilationErrorHandling(MessageCompilationTests):\n\n LOCALE='ja'\n MO_FILE='locale/%s/LC_MESSAGES/django.mo' % LOCALE\n\n def setUp(self):\n super(CompilationErrorHandling, self).setUp()\n self.addCleanup(self.rmfile, os.path.join(test_dir, self.MO_FILE))\n\n def test_error_reported_by_msgfmt(self):\n with self.assertRaises(CommandError):\n call_command('compilemessages', locale=[self.LOCALE], stdout=StringIO())\n","repo_name":"lumanjiao/XLS_BigData_Hue","sub_path":"desktop/core/ext-py/Django-1.6.10/tests/i18n/commands/compilation.py","file_name":"compilation.py","file_ext":"py","file_size_in_byte":4185,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"21"} +{"seq_id":"32695166836","text":"import requests\nfrom Samplas.open_json import open_json\n\n\nSERVER_STATIC = \"http://static-maps.yandex.ru/1.x/\"\nSERVER_GEOCODE = \"http://geocode-maps.yandex.ru/1.x/\"\nSERVER_SEARCH = \"https://search-maps.yandex.ru/v1/\"\n\n\ndef geocode(geocode: str, sco=\"latlong\", kind=\"house\", format=\"json\"): # запрос\n \"\"\"\n :param geocode: str\n :param sco: longlat - долгота, широта, latlong - широта, долгота\n :param kind: house, street, metro, district, locality\n :param format: json, xml\n :return: dict\n \"\"\"\n\n apikey = \"40d1649f-0493-4b70-98ba-98533de7710b\"\n\n geo_params = {\n \"geocode\": geocode,\n \"apikey\": apikey,\n \"sco\": sco,\n \"kind\": kind,\n \"format\": format\n }\n\n response = requests.get(SERVER_GEOCODE, geo_params).json()\n # open_json(response)\n features = response[\"response\"][\"GeoObjectCollection\"][\"featureMember\"][0][\"GeoObject\"]\n return features\n\n\ndef get_coordinates(address):\n toponym = geocode(address, sco=\"latlong\")\n if not toponym:\n return None, None\n\n toponym_coordinates = toponym[\"Point\"][\"pos\"]\n\n toponym_longitude, toponym_lattitude = map(float, toponym_coordinates.split(\" \"))\n\n # Собираем координаты в параметр ll\n ll = (toponym_longitude, toponym_lattitude)\n\n # Рамка вокруг объекта:\n envelope = toponym[\"boundedBy\"][\"Envelope\"]\n\n # левая, нижняя, правая и верхняя границы из координат углов:\n l, b = envelope[\"lowerCorner\"].split(\" \")\n r, t = envelope[\"upperCorner\"].split(\" \")\n\n # Вычисляем полуразмеры по вертикали и горизонтали\n dx = abs(float(l) - float(r)) / 2.0\n dy = abs(float(t) - float(b)) / 2.0\n\n # Собираем размеры в параметр span\n span = (dx, dy)\n\n return ll, span\n\n\ndef get_photo(point: str, spn=\"0.05,0.05\", type_photo=\"map\", mark=\"\"): # получить фото\n request = f\"{SERVER_STATIC}?ll={point}&spn={spn}&l={type_photo}&pt={mark}\"\n # print(request)\n response = requests.get(request)\n return response\n\n\ndef change_spn(spn: tuple, value: int) -> tuple: # изменить spn\n coef = 1.1\n if value < 0:\n coef = 0.5\n spn = (spn[0] + value * spn[0] * coef, spn[1] + value * spn[1] * coef)\n spn = (spn[0] if spn[0] > 0.001 else 0.001, spn[1] if spn[1] > 0.001 else 0.001)\n return spn\n\n\ndef change_ll(ll: tuple, spn: tuple, value: tuple): # изменить координаты\n ll = ll[0] + spn[0] * value[0] * 2, ll[1] + spn[1] * value[1] * 2\n return ll\n\n\nif __name__ == \"__main__\":\n ...\n","repo_name":"PhoenixAtlanta/Maps_API","sub_path":"Samplas/geocode.py","file_name":"geocode.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43276228127","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name = \"br-nome-gen\",\n version = \"1.3.0\",\n author = \"Victor Williams Stafusa da Silva\",\n author_email = \"victorwssilva@gmail.com\",\n description = \"A generator of Brazilian typical names.\",\n long_description = long_description,\n long_description_content_type = \"text/markdown\",\n url = \"https://github.com/victorwss/br-nome-gen\",\n packages = setuptools.find_packages(),\n classifiers = [\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n)","repo_name":"victorwss/br-nome-gen","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"3267180160","text":"# Unique path is a unique coding challenge because we have to calculate how many paths that robot will find its destiny. \n# You can check this coding challenge in Leetcode\n\n# There will be a matrix for robot and robot can only move right and down. For instance, there is a matrix 7x3 and calculate the\n# paths.\n\n# input => m = 3, n = 2\n# output => 3\n\n# Time and space complexity to solve this challenge:\n# - Time complexity, I am going to iterate every rows and columns, so it will be O(m * n).\n# - Space complexity, same with time complexity where I have to save the calculation in the variable until I get the sum of paths\n\n# Lets code:\n\nclass solution():\n def uniquepaths(self, m, n):\n matrix = []\n for i in range(m):\n matrix.append([0]*3)\n for i in range(m):\n matrix[i][0] = 1\n for j in range(n):\n matrix[0][j] = 1\n for i in range(1,m):\n for j in range(1,n):\n matrix[i][j] = matrix[i][j-1] + matrix[i-1][j]\n return matrix[m-1][n-1]\n \nsolution().uniquepaths(3, 2)\n","repo_name":"densaiko/coding-challenge","sub_path":"unique paths.py","file_name":"unique paths.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34230581260","text":"from tkinter import *\nfrom tkinter import ttk\nfrom tkinter import messagebox\nfrom PIL import Image, ImageTk\nimport mysql.connector as connect\nimport cv2\nimport os\nimport csv\nfrom tkinter import filedialog\n\nmydata = []\nclass Attendance:\n def __init__(self,root):\n self.root = root\n self.root.geometry(\"1500x1000+0+0\")\n self.root.title(\"Face recognisation system\")\n self.root.config(bg=\"#b0cbf7\")\n \n self.att_id_var = StringVar()\n self.var_name = StringVar()\n self.var_roll = StringVar()\n self.var_date = StringVar()\n self.var_time = StringVar()\n self.var_dep = StringVar()\n self.var_sts = StringVar()\n\n\n # Image1\n image = Image.open(\".\\college_images\\Stanford.jpg\")\n img = image.resize((750,150),Image.ANTIALIAS)\n self.photoImage = ImageTk.PhotoImage(img)\n\n f_lbl = Label(self.root,image=self.photoImage)\n f_lbl.place(x=0,y=0,height=150,width=750)\n\n # Image 2\n image1 = Image.open(\".\\\\college_images\\\\facialrecognition.png\")\n img1 = image1.resize((750,150),Image.ANTIALIAS)\n self.photoImage1 = ImageTk.PhotoImage(img1)\n\n f_lbl1 = Label(self.root,image=self.photoImage1)\n f_lbl1.place(x=750,y=0,height=150,width=750)\n\n frames = Frame(self.root,bg=\"#0080ff\")\n frames.place(x=0,y=150,width=1500,height=850)\n\n # Text Heading\n title_lbl = Label(frames,text=\"Attendance Management system\",font=\"Helvatica 35 bold\",bg=\"white\",fg=\"#0089ff\")\n title_lbl.place(x=0,y=0,width=1500,height=55)\n\n main_frame = Frame(frames,bd=2)\n main_frame.place(x=10,y=65,width=1480,height=775)\n\n # Left Label Frame\n lft_lbl_frame = LabelFrame(main_frame,relief=RIDGE,text=\"Student Details\",font=\"Helvatica 15 bold\",bd=4)\n lft_lbl_frame.place(x=10,y=10,width=730,height=755)\n\n # Students Image\n stu_img = Image.open(\".\\\\college_images\\\\stu.jpg\")\n img_stu = stu_img.resize((730,200),Image.ANTIALIAS)\n self.stu = ImageTk.PhotoImage(img_stu)\n\n f1_lbl = Label(lft_lbl_frame,image=self.stu)\n f1_lbl.place(x=10,y=0,width=705,height=220)\n\n left_inner = Frame(lft_lbl_frame,bd=4,relief=RIDGE)\n left_inner.place(x=10,y=230,width=705,height=380)\n\n # Attendance id\n att_id = Label(left_inner,text=\"Attendance Id :\",font=\"Helvatica 13 bold\")\n att_id.grid(row=0,column=0,padx=2,pady=10,sticky=W)\n\n att_id_entry = Entry(left_inner,width=12,font=\"Helvatica 13 bold\",textvariable=self.att_id_var)\n att_id_entry.grid(row=0,column=1,padx=2,pady=10,sticky=W)\n\n # Dept\n dept = Label(left_inner,text=\"Department :\",font=\"Helvatica 13 bold\")\n dept.grid(row=0,column=2,padx=2,pady=10,sticky=W)\n\n dept_entry = Entry(left_inner,width=12,font=\"Helvatica 13 bold\",textvariable=self.var_dep)\n dept_entry.grid(row=0,column=3,padx=2,pady=10,sticky=W)\n\n # name\n name = Label(left_inner,text=\"Name :\",font=\"Helvatica 13 bold\")\n name.grid(row=1,column=0,padx=2,pady=10,sticky=W)\n\n name_entry = Entry(left_inner,width=12,font=\"Helvatica 13 bold\",textvariable=self.var_name)\n name_entry.grid(row=1,column=1,padx=2,pady=10,sticky=W)\n\n # roll\n roll = Label(left_inner,text=\"Roll Number :\",font=\"Helvatica 13 bold\")\n roll.grid(row=1,column=2,padx=2,pady=10,sticky=W)\n\n roll_entry = Entry(left_inner,width=12,font=\"Helvatica 13 bold\",textvariable=self.var_roll)\n roll_entry.grid(row=1,column=3,padx=2,pady=10,sticky=W)\n\n # date\n date = Label(left_inner,text=\"Date :\",font=\"Helvatica 13 bold\")\n date.grid(row=2,column=0,padx=2,pady=10,sticky=W)\n\n date_entry = Entry(left_inner,width=12,font=\"Helvatica 13 bold\",textvariable=self.var_date)\n date_entry.grid(row=2,column=1,padx=2,pady=10,sticky=W)\n\n # time\n time = Label(left_inner,text=\"Time :\",font=\"Helvatica 13 bold\")\n time.grid(row=2,column=2,padx=2,pady=10,sticky=W)\n\n time_entry = Entry(left_inner,width=12,font=\"Helvatica 13 bold\",textvariable=self.var_time)\n time_entry.grid(row=2,column=3,padx=2,pady=10,sticky=W)\n\n sts_lbl = Label(left_inner,text=\"Attendance Status :\",font=\"Helvatica 15 bold\")\n sts_lbl.grid(row=4,column=0,padx=5,pady=10,sticky=W)\n\n sts_combo = ttk.Combobox(left_inner,font=\"Helvatica 15 bold\",textvariable=self.var_sts,width=17,state=\"readonly\")\n sts_combo['values'] = (\"Select the Status\",\"Present\",\"Absent\")\n sts_combo.current(0)\n sts_combo.grid(row=4,column=1,padx=5,pady=10,sticky=W)\n\n # Buttons Frame\n\n btn_frame = Frame(left_inner,bd=4,relief=RIDGE)\n btn_frame.place(x=15,y=250,width=672,height=50)\n\n # Save btn\n save_btn = Button(btn_frame,text=\"Import Csv\",width=18,font=\"Helvatica 15 bold\",bg=\"#0089ff\",fg=\"white\",command=self.importcsv)\n save_btn.grid(row=0,column=0)\n # Update btn\n upd_btn = Button(btn_frame,text=\"Export Csv\",width=18,font=\"Helvatica 15 bold\",bg=\"#003161\",fg=\"white\",command=self.exportcsv)\n upd_btn.grid(row=0,column=1)\n # Reset btn\n reset_btn = Button(btn_frame,text=\"Reset\",width=18,font=\"Helvatica 15 bold\",bg=\"#9d00ff\",fg=\"white\",command=self.reset)\n reset_btn.grid(row=0,column=3)\n\n\n # Right Label Frame\n rgt_lbl_frame = LabelFrame(main_frame,relief=RIDGE,text=\"Student Details\",font=\"Helvatica 15 bold\",bd=4)\n rgt_lbl_frame.place(x=750,y=10,width=720,height=755)\n\n # Students Image\n stu_img_rgt = Image.open(\".\\\\college_images\\\\stu2.jpg\")\n img_stu_rgt = stu_img_rgt.resize((705,200),Image.ANTIALIAS)\n self.stu_rgt = ImageTk.PhotoImage(img_stu_rgt)\n\n f1_rgt_lbl = Label(rgt_lbl_frame,image=self.stu_rgt)\n f1_rgt_lbl.place(x=5,y=0,width=705,height=220)\n\n # Table Frame\n table_frame = Frame(rgt_lbl_frame,bd=4,relief=RIDGE)\n table_frame.place(x=5,y=220,width=705,height=500)\n\n # Horizontal Scroller\n scroll_x = Scrollbar(table_frame,orient=HORIZONTAL)\n\n # Vertical Scroller\n scroll_y = Scrollbar(table_frame,orient=VERTICAL)\n\n self.attd = ttk.Treeview(table_frame,column=(\"id\",\"name\",\"dep\",\"date\",\"time\",\"attendance\"),xscrollcommand=scroll_x.set,yscrollcommand=scroll_y.set)\n\n\n scroll_x.pack(side=BOTTOM,fill=X)\n scroll_x.config(command=self.attd.xview)\n scroll_y.pack(side=RIGHT,fill=Y)\n scroll_y.config(command=self.attd.yview)\n\n self.attd.heading(\"id\",text=\"Attendance Id\")\n self.attd.heading(\"name\",text=\"Name\")\n self.attd.heading(\"dep\",text=\"Department\")\n self.attd.heading(\"date\",text=\"Date\")\n self.attd.heading(\"time\",text=\"Time\")\n self.attd.heading(\"attendance\",text=\"Attendance\")\n self.attd['show'] = \"headings\"\n\n self.attd.column(\"id\",width=100)\n self.attd.column(\"name\",width=100)\n self.attd.column(\"dep\",width=100)\n self.attd.column(\"date\",width=100)\n self.attd.column(\"time\",width=100)\n\n\n self.attd.pack(fill=BOTH,expand=1)\n \n self.attd.bind(\"\",self.get_cur)\n\n def fetch_data(self,rows):\n self.attd.delete(*self.attd.get_children())\n\n for i in rows:\n self.attd.insert(\"\",END,values=i)\n\n def importcsv(self):\n global mydata\n mydata.clear()\n fln = filedialog.askopenfilename(initialdir=os.getcwd(),title=\"Open CSV\",filetypes=((\"CSV files\",\".csv\"),(\"All Files\",\"*.*\")),parent=self.root)\n with open(fln) as myfile:\n csvread = csv.reader(myfile,delimiter=\",\")\n for i in csvread:\n mydata.append(i)\n self.fetch_data(mydata)\n\n def exportcsv(self):\n try:\n if len(mydata) < 1:\n messagebox.showerror(\"No data\",\"No data found to export\",parent=self.root)\n return False\n fln = filedialog.asksaveasfilename(initialdir=os.getcwd(),title=\"Open CSV\",filetypes=((\"CSV files\",\".csv\"),(\"All Files\",\"*.*\")),parent=self.root)\n with open(fln,mode='w',newline='') as myfile:\n exp_write = csv.writer(myfile,delimiter=\",\")\n for i in mydata:\n exp_write.writerow(i)\n messagebox.showinfo(\"Data Exported\",\"Your data is successfully exported\")\n except Exception as e:\n messagebox.showerror(\"Error\",\"Due to :{}\".format(e),parent=self.root)\n\n def get_cur(self,event=\"\"):\n cur_row = self.attd.focus()\n content = self.attd.item(cur_row)\n rows = content['values']\n self.att_id_var.set(rows[0])\n self.var_name.set(rows[1])\n self.var_dep.set(rows[2])\n self.var_roll.set(rows[0])\n self.var_date.set(rows[3])\n self.var_time.set(rows[4])\n self.var_sts.set(rows[5])\n\n def reset(self):\n self.att_id_var.set(\"\")\n self.var_name.set(\"\")\n self.var_dep.set(\"\")\n self.var_roll.set(\"\")\n self.var_date.set(\"\")\n self.var_time.set(\"\")\n self.var_sts.set(\"Select the Status\")\n\n\nif __name__ == \"__main__\":\n root = Tk()\n obj = Attendance(root)\n root.mainloop() ","repo_name":"Swarup2608/student_system_with_face_recognition","sub_path":"attendance.py","file_name":"attendance.py","file_ext":"py","file_size_in_byte":9258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28168305722","text":"# Task 1: Polynomial representation, function provided by the professor.\ndef poly_to_string(p_list):\n \"\"\"\n Return a string with a nice readable version of the polynomial given in p_list.\n \"\"\"\n terms = []\n degree = 0\n\n # Collect a list of terms\n for coeff in p_list:\n if degree == 0:\n terms.append(str(coeff))\n elif degree == 1:\n terms.append(str(coeff) + 'x')\n else:\n term = str(coeff) + 'x^' + str(degree)\n terms.append(term)\n degree += 1\n\n final_string = ' + '.join(terms) # The string ' + ' is used as \"glue\" between the elements in the string\n return final_string\n\n\n# Define two lists for evaluating the function poly_to_string\np = [2, 0, 1] # The evaluation of this list on poly_to_string should be equal to 2 + 0x + 1x^2\nq = [-2, 1, 0, 0, 1] # The evaluation of this list on poly_to_string should be equal to -2 + 1x + 0x^2 + 0x^3 + 1x^4\n\n\n# Task 2: Edit the function poly_to_string with some new features\ndef poly_to_string_improved(p_list):\n \"\"\"\n Return a string with a nice readable version of the polynomial given in p_list.\n \"\"\"\n if not p_list or all(coeff == 0 for coeff in p_list): # If the list is empty or all elements are 0, return 0\n return '0'\n terms = []\n degree = 0\n\n # Collect a list of terms\n for coeff in p_list:\n # If statement for coefficients that are 0\n if coeff == 0: # If coefficient is 0, continue to next iteration and increase degree by 1 (not added to list)\n degree += 1\n continue\n # If statement for coefficients that are 1\n if coeff == 1 and degree > 0: # If coefficient is 1 and degree is greater than 0, append x or x^degree\n if degree == 1:\n terms.append('x')\n else:\n term = 'x^' + str(degree)\n terms.append(term)\n # If statement for coefficients that are -1\n elif coeff == -1:\n if degree == 0: # If coefficient is -1 and degree is 0, append -1\n terms.append('-1')\n elif degree == 1: # If coefficient is -1 and degree is 1, append -x\n terms.append('-x')\n else: # If coefficient is -1 and degree is other than 1, append -x^degree\n term = '-x^' + str(degree)\n terms.append(term)\n # If statement for coefficients that are not 0, 1, -1\n else:\n if degree == 0: # If degree is 0, append coefficient\n terms.append(str(coeff))\n elif degree == 1: # If degree is 1, append coefficient*x\n terms.append(str(coeff) + 'x')\n else: # If degree is other than 1, append coefficient*x^degree\n term = str(coeff) + 'x^' + str(degree)\n terms.append(term)\n degree += 1 # Increase degree by 1\n\n final_string = ' + '.join(terms) # The string ' + ' is used as \"glue\" between the elements in the string\n return final_string\n\n\n# Task 3a: Create a function that drop zeros\ndef drop_zeros(p_list):\n \"\"\"\n Remove all zeros at the end the list p_list and return the result.\n \"\"\"\n while p_list and p_list[-1] == 0: # While p_list is not empty and the last element is 0, remove the last element\n p_list.pop() # Remove the last element\n return p_list\n\n\n# Task 3b: Create a function that checks if two polynomials are equal by ignoring zeros at the end of the list\ndef eq_poly(p_list, q_list):\n \"\"\"\n Return True if the polynomials p_list and q_list are equal, otherwise return False.\n \"\"\"\n p_list = drop_zeros(p_list) # Remove zeros at the end of p_list\n q_list = drop_zeros(q_list) # Remove zeros at the end of q_list\n if len(p_list) != len(q_list): # If the length of p_list and q_list are not equal, they are not equal (False)\n return False\n for i in range(len(p_list)): # Loop through the list\n if p_list[i] != q_list[i]: # If the i elements of p_list and q_list are not equal, they are not equal (False)\n return False\n return True # If the length of p_list and q_list are equal and the elements are equal, it is True\n\n\n# Task 4: Create a function that evaluates a polynomial at a given point\ndef eval_poly(p_list, x):\n \"\"\"\n Evaluate the polynomial given in p_list at the point x and return the result.\n \"\"\"\n result = 0\n for i in range(len(p_list)): # Loop through the list\n result += p_list[i] * x ** i # Add the result of the polynomial at the point x to the result\n return result # Return the result\n\n\n# Task 5a: Create a function that convert a polynomial to its negative form\ndef negate_poly(p_list):\n \"\"\"\n Negate the polynomial given in p_list.\n \"\"\"\n return [-coeff for coeff in p_list] # Return the list with all elements multiplied by -1\n\n\n# Task 5b: Create a function that add two polynomials\ndef add_poly(p_list, q_list):\n \"\"\"\n Add the polynomials p_list and q_list.\n \"\"\"\n result = []\n for i in range(max(len(p_list), len(q_list))): # Loop through the list with the highest length\n if i < len(p_list) and i < len(q_list): # If i is less than the length of p_list and q_list (inside the range)\n result.append(p_list[i] + q_list[i]) # Add the elements of p_list and q_list and append to result list\n elif i < len(p_list): # If i is less than the length of p_list, append the element of p_list\n result.append(p_list[i])\n else: # If i is less than the length of q_list, append the element of q_list\n result.append(q_list[i])\n return drop_zeros(result) # Return the result list without zeros at the end\n\n\n# Task 5c: Create a function that subtract two polynomials\ndef sub_poly(p_list, q_list):\n \"\"\"\n Subtract the polynomial p_list from the polynomial q_list.\n \"\"\"\n return add_poly(p_list, negate_poly(q_list)) # Return the result of adding p_list and the negative of q_list\n\n","repo_name":"faerazo/programming-techniques","sub_path":"Lab 2/labb2.py","file_name":"labb2.py","file_ext":"py","file_size_in_byte":5984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32764119780","text":"import asyncio\nfrom functools import wraps\nimport time\nfrom aioquiklua import QuikLuaClientBase, QuikLuaException, QuikLuaConnectionException, QuikLuaNoHistoryException\n\n\ndef timing(f):\n @wraps(f)\n async def wrap(*args, **kw):\n ts = time.time()\n result = await f(*args, **kw)\n te = time.time()\n print('func:%r args:[%r, %r] took: %2.4f sec' % (f.__name__, args, kw, te - ts))\n return result\n\n return wrap\n\n\nclass QuikLuaClientBenchmarker(QuikLuaClientBase):\n @timing\n async def test_heartbeat_sync(self, n_steps):\n for i in range(n_steps):\n await self.heartbeat()\n\n @timing\n async def test_heartbeat_async(self, n_steps):\n await asyncio.gather(*[self.heartbeat() for i in range(n_steps)])\n @timing\n async def test_getClassesList_sync(self, n_steps):\n for i in range(n_steps):\n await self.rpc_call('getClassesList')\n\n @timing\n async def test_getClassesList_async(self, n_steps):\n await asyncio.gather(*[self.rpc_call('getClassesList') for i in range(n_steps)])\n\n @timing\n async def test_historical_quotes_cached_async(self, n_steps):\n await asyncio.gather(*[self.get_price_history('SPBFUT', 'RIH1', \"INTERVAL_M1\", use_caching=True) for i in range(n_steps)])\n\n @timing\n async def test_historical_quotes_cached_before_async(self, n_steps):\n await asyncio.gather(*[self.get_price_history('SPBFUT', 'RIH1', \"INTERVAL_M1\", use_caching=True) for i in range(n_steps)])\n\n @timing\n async def test_historical_quotes_no_cache(self, n_steps):\n await asyncio.gather(*[self.get_price_history('SPBFUT', 'RIH1', \"INTERVAL_M1\", use_caching=False) for i in range(n_steps)])\n\n async def main(self):\n await self.initialize()\n n_steps = 1000\n await self.test_heartbeat_sync(n_steps)\n await self.test_heartbeat_async(n_steps)\n await self.test_getClassesList_sync(n_steps)\n await self.test_getClassesList_async(n_steps)\n await self.test_historical_quotes_cached_async(5)\n\n # Let cache timeout pass\n await asyncio.sleep(1)\n await self.test_historical_quotes_cached_before_async(1)\n\n await self.test_historical_quotes_no_cache(5)\n\n # Clean up\n await self.shutdown()\n\n\nif __name__ == '__main__':\n qclient = QuikLuaClientBenchmarker(\"tcp://localhost:5560\", None)\n # qclient = QuikLuaClientBenchmarker(\"tcp://localhost:5560\", \"tcp://localhost:5570\")\n asyncio.run(qclient.main())\n","repo_name":"alexveden/quik-lua-async-client-python","sub_path":"examples/benchmarks.py","file_name":"benchmarks.py","file_ext":"py","file_size_in_byte":2510,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"3418285707","text":"from django import forms\nfrom .models import Categoria, SubCategoria, Marca, Um, Productos\n \n \n \nclass CategoriaForm(forms.ModelForm):\n class Meta:\n model = Categoria\n # campos que figuran en el formulario. tiene q estar si o si\n # este campo fields o exclude que pone todo menso lo q se pone ahi\n fields = ['descripcion', 'estado']\n labels = {'descripcion':\"Descripción de la Categoria\",\n \"estado\": \"Estado\"}\n widget = {\"descripcion\": forms.TextInput}\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field in iter(self.fields):\n self.fields[field].widget.attrs.update({\n 'class':'form-control'\n })\n \nclass SubCategoriaForm(forms.ModelForm):\n # aca modifico para que si esta inactivo no lo muestre\n categoria = forms.ModelChoiceField(\n queryset=Categoria.objects.filter(estado=True)\n .order_by('descripcion')\n )\n # formulario para la subcategoria\n class Meta:\n model = SubCategoria\n fields = ['categoria','descripcion', 'estado']\n labels = {'descripcion':\"SubCategoria\",\n \"estado\": \"Estado\"}\n widget = {\"descripcion\": forms.TextInput}\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field in iter(self.fields):\n self.fields[field].widget.attrs.update({\n 'class':'form-control'\n })\n self.fields['categoria'].empty_table = \"Seleccione Categoria\"\n \n \nclass MarcaForm(forms.ModelForm):\n # aca modifico para que si esta inactivo no lo muestre\n # categoria = forms.ModelChoiceField(\n # queryset=Categoria.objects.filter(estado=True)\n # .order_by('descripcion')\n # )\n class Meta:\n model = Marca\n fields = ['descripcion', 'estado']\n labels = {'descripcion':\"Nombre de la Marca\",\n \"estado\": \"Estado\"}\n widget = {\"descripcion\": forms.TextInput}\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field in iter(self.fields):\n self.fields[field].widget.attrs.update({\n 'class':'form-control'\n })\n \n \nclass UmForm(forms.ModelForm):\n # aca modifico para que si esta inactivo no lo muestre\n # categoria = forms.ModelChoiceField(\n # queryset=Categoria.objects.filter(estado=True)\n # .order_by('descripcion')\n # )\n class Meta:\n model = Um\n fields = ['descripcion', 'estado']\n labels = {'descripcion':\"Unidad de Medida\",\n \"estado\": \"Estado\"}\n widget = {\"descripcion\": forms.TextInput}\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field in iter(self.fields):\n self.fields[field].widget.attrs.update({\n 'class':'form-control'\n })\n \nclass ProductosForm(forms.ModelForm):\n # aca modifico para que si esta inactivo no lo muestre\n # categoria = forms.ModelChoiceField(\n # queryset=Categoria.objects.filter(estado=True)\n # .order_by('descripcion')\n # )\n\n class Meta:\n model = Productos\n fields = [\n 'descripcion', \\\n 'estado', \\\n 'codigo', \\\n 'codigo_barra', \\\n 'precio', \\\n 'existencia', \\\n 'ultima_compra', \\\n 'marca', \\\n 'subcategoria', \\\n 'unidad_medida', \\\n ]\n labels = {\n 'descripcion':\"Producto\",\n \"estado\": \"Estado\",\n 'codigo': 'Código',\n 'codigo_barra': 'Código de Barras',\n 'precio': 'Precio',\n 'existencia': 'Existencia',\n 'ultima_compra':'Ultima Compra',\n 'marca':'Marca',\n 'subcategoria':'SubCategoria',\n 'unidad_medida':'Unidad de Medida'\n }\n # campos que quiero que se excluya\n exclude = ['um','fm','uc','fc']\n widget = {\"descripcion\": forms.TextInput}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field in iter(self.fields):\n self.fields[field].widget.attrs.update({\n 'class':'form-control'\n })\n self.fields['ultima_compra'].widget.attrs['readonly'] = True\n self.fields['existencia'].widget.attrs['readonly'] = True","repo_name":"eze-fayu/sistema-facturacion","sub_path":"app/inv/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4546,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71162111413","text":"import pymysql\n\nfrom DBUtils.PooledDB import PooledDB\n\n\nclass DbManager(object):\n \n def __init__(self, conn_args):\n cmds = [\"set names utf8mb4;\"]\n self._pool = PooledDB(pymysql, mincached=5, maxcached=20, setsession=cmds, **conn_args)\n \n def connection(self):\n return self._pool.connection()\n\n def execute_query(self, sql, as_dict=True):\n \"\"\"\n 查询语句\n :param sql: \n :param as_dict: \n :return: \n \"\"\"\n conn = None\n cur = None\n try:\n conn = self._pool.connection()\n cur = conn.cursor()\n cur.execute(sql)\n rst = cur.fetchall()\n if rst:\n if as_dict:\n fields = [tup[0] for tup in cur._cursor.description]\n return [dict(zip(fields, row)) for row in rst]\n return rst\n return rst\n\n except Exception as e:\n print('sql:[{}]meet error'.format(sql))\n print(e.args[-1])\n return ()\n finally:\n if conn:\n conn.close()\n if cur:\n cur.close()\n \n def execute_manay(self, sql, data):\n \"\"\"\n 执行多条语句\n :param sql:\n :param data:\n :return:\n \"\"\"\n conn = None\n cur = None\n try:\n conn = self._pool.connection()\n cur = conn.cursor()\n cur.executemany(sql, data)\n conn.commit()\n return True\n except Exception as e:\n print('[{}]meet error'.format(sql))\n print(e.args[-1])\n conn.rollback()\n return False\n finally:\n if conn:\n conn.close()\n if cur:\n cur.close()","repo_name":"BestUO/blog","sub_path":"back/src/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"46870923634","text":"from puzzle import PuzzleContext\n\nDIRS = [\n (i, j)\n for i in [-1, 0, 1]\n for j in [-1, 0, 1]\n if not (i == 0 and j == 0)\n]\n\ndef is_inside(brd, i, j):\n n = len(brd)\n m = len(brd[0])\n return i >= 0 and i < n and j >= 0 and j < m \n\ndef count_adjacent(brd, i, j, c):\n cnt = 0\n for di, dj in DIRS:\n ii = di + i\n jj = dj + j\n if is_inside(brd, ii, jj) and brd[ii][jj] == c:\n cnt += 1\n return cnt\n\n\ndef count_visible(brd, i, j, c):\n cnt = 0\n\n for di, dj in DIRS:\n ii = i + di\n jj = j + dj\n while is_inside(brd, ii, jj):\n if brd[ii][jj] != \".\":\n if brd[ii][jj] == c:\n cnt += 1\n break\n ii += di\n jj += dj\n\n return cnt\n\n\ndef simulate(brd, count_neighs, rules):\n new_brd = [[c for c in r] for r in brd]\n n = len(brd)\n m = len(brd[0])\n for i in range(n):\n for j in range(m):\n n_occupied = count_neighs(brd, i, j, \"#\")\n new_brd[i][j] = rules[brd[i][j]](n_occupied)\n\n new_brd = [\"\".join(r) for r in new_brd]\n changed = brd != new_brd\n return new_brd, changed\n\n\nwith PuzzleContext(year=2020, day=11) as ctx:\n # Part 1\n brd = ctx.nonempty_lines\n while True:\n brd, changed = simulate(brd, count_adjacent, rules={\n \"L\": lambda n_occupied: \"#\" if n_occupied == 0 else \"L\",\n \"#\": lambda n_occupied: \"L\" if n_occupied >= 4 else \"#\",\n \".\": lambda _: \".\",\n })\n if not changed:\n break\n\n ctx.submit(1, \"\".join(brd).count(\"#\"))\n\n # Part 2\n brd = ctx.nonempty_lines\n while True:\n brd, changed = simulate(brd, count_visible, rules={\n \"L\": lambda n_occupied: \"#\" if n_occupied == 0 else \"L\",\n \"#\": lambda n_occupied: \"L\" if n_occupied >= 5 else \"#\",\n \".\": lambda _: \".\",\n })\n if not changed:\n break\n\n ctx.submit(2, \"\".join(brd).count(\"#\"))\n","repo_name":"vstrimaitis/aoc-2020","sub_path":"11/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30605881675","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom sklearn.semi_supervised import LabelPropagation\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.preprocessing import StandardScaler\nimport copy\nimport matplotlib.pyplot as plt\n\n\ndata = pd.read_csv('breast-cancer-wisconsin_cleaned.csv')\ndata.to_csv()\n#\n# scaler = StandardScaler()\n\nY = data[['Class']]\nX = data.drop('Class', axis=1)\n\n\n# X_std = scaler.fit_transform(X)\n# X_pd = pd.DataFrame(X_std, columns=X.columns)\n# X = X_pd\n\n\nfrom sklearn.model_selection import StratifiedShuffleSplit\n\ntestsize_list = []\ntrain_socres_list = []\ntest_scores_list = []\n\ndef semi_shuffle_estimator(n_splits=10, test_size=0.6, seed=0, gamma=4, n_neighbors=6, max_iter=1000):\n sss = StratifiedShuffleSplit(n_splits=n_splits, test_size=test_size, random_state=seed)\n i = 0\n testsize_list.append(test_size)\n train_scores = []\n test_scores =[]\n for label_index, unlabel_index in sss.split(X, Y):\n i += 1\n X_train = X.iloc[label_index]\n Y_train = Y.iloc[label_index]\n X_test = X.iloc[unlabel_index]\n Y_test = Y.iloc[unlabel_index]\n\n Y_unlabel = copy.deepcopy(Y_test)\n Y_unlabel['Class'] = -1\n\n X_new = pd.concat([X_train, X_test])\n Y_new = pd.concat([Y_train, Y_unlabel])\n\n\n shuffle_index = np.random.permutation(X.index)\n X_new_shuffle = X_new.take(shuffle_index)\n Y_new_shuffle = Y_new.take(shuffle_index)\n\n lp = LabelPropagation(gamma=gamma, n_neighbors=n_neighbors, max_iter=max_iter)\n lp.fit(X_new_shuffle, Y_new_shuffle.values.ravel())\n\n Y_predict_train = lp.predict(X_train)\n Y_predict_test = lp.predict(X_test)\n train_scores.append(accuracy_score(Y_train, Y_predict_train))\n test_scores.append(accuracy_score(Y_test, Y_predict_test))\n # print(\"-------Cross_validation epoch {}--------\".format(i))\n # print(\"The accuracy in train set:\", accuracy_score(Y_train, Y_predict_train))\n # print(\"The accuracy in test set:\", accuracy_score(Y_test, Y_predict_test))\n mean_train_score = np.array(train_scores).mean()\n mean_test_score = np.array(test_scores).mean()\n print(\"For test size {}, the mean accuracy in train set is {}\".format(test_size, mean_train_score))\n print(\"For test size {}, the mean accuracy in test set is {}\".format(test_size, mean_test_score))\n train_socres_list.append(mean_train_score)\n test_scores_list.append(mean_test_score)\n\n\nif __name__ == '__main__':\n\n for ratio in np.linspace(0.1, 0.94, 20):\n semi_shuffle_estimator(test_size=ratio)\n plt.plot(testsize_list, train_socres_list)\n plt.plot(testsize_list, test_scores_list)\n plt.show()\n\n","repo_name":"guofei1989/ML_widgets","sub_path":"demos/semi_supervised/breast_cancer_semi_test.py","file_name":"breast_cancer_semi_test.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27294634068","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport cv2\nimport glob\nimport os\nimport zipfile as zf\nimport pickle\nfrom pathlib import Path\nimport random\nimport math\nimport scipy.ndimage\nimport scipy.io\nimport matplotlib.pyplot as plt\nfrom scipy import optimize as opt\nimport matplotlib.image as img\nfrom scipy.ndimage import median_filter\nimport sys\n\n# global variables:\nMIN_MATCH_COUNT = 24 # minimum number of matches required \nNUM_MATCH = 500 # total number of matches that we want to subsample from different locations of the image.\n # For every region in the image analysed, if it contains at least a descriptor, and \n # if the number of descriptors subsampled is fixed to N = 1, there will be 500 matches. \n # Nevertheless, some regions of the image can have no descriptors, so the total number of matches will be reduced. \n # For solving this, we can sample N > 1 descriptors from the regions of the image analysed \n\nMAX_ITER = 600 # number of iterations in ransac\nthreshold_error = 4 # threshold error for points to be considered inliers, when performing ransac\n\n\n# In[2]:\n\n\ndef match_features(des_src, des_dest, threshold):\n '''\n Implements the Nearest Neighbor Distance Ratio Test (NNDR) - Equation 4.18 in Section 4.1.3 of \n Szeliski - to assign matches between interest points in two images. It also searches for mutual \n matches and applies the NNDR test\n \n A match is between a feature in des_src and a feature in des_dest. We can\n represent this match as a the index of the feature in des_src and the index\n of the feature in des_dest\n \n :params:\n :des_src: an np array of features for interest points in source image\n :des_dest: an np array of features for interest points in destination image\n \n :returns:\n :matches: an np array of dimension k x 2 where k is the number of matches. The first\n column is an index into des_src and the second column is an index into des_dest\n '''\n \n global MIN_MATCH_COUNT\n\n matches = []\n \n # Re-normalize\n des_dest_normalize = des_dest / np.linalg.norm(des_dest, axis = 0)\n \n des_src_normalize = des_src / np.linalg.norm(des_src, axis = 0)\n \n # cosine similarity (descriptors are L2 normalized) \n matrix_similarity = des_src_normalize.T @ des_dest_normalize\n \n ind_col_matches = np.argmax(matrix_similarity, axis = 1)\n \n matches = np.concatenate((np.arange(0, des_src.shape[1]).reshape(-1,1), ind_col_matches.reshape(-1, 1)), axis = 1)\n final_matches = matches\n \n # FIND GOOD MATCHES:\n # Retrieve top 2 nearest neighbors 1->2.\n index_sorted = np.argsort(-matrix_similarity, axis = 1)[:, 0:2]\n\n matrix_distances = np.sqrt(2 - 2 * matrix_similarity)\n \n mask_good_matches = matrix_distances[list(range(0,matrix_distances.shape[0])), index_sorted[:, 0]] / matrix_distances[list(range(0,matrix_distances.shape[0])), index_sorted[:, 1]] < threshold\n \n if np.any(mask_good_matches):\n good_matches = matches[mask_good_matches, :]\n \n print(\"good matches/matches - %d/%d\" % (good_matches.shape[0],matches.shape[0]))\n \n if good_matches.shape[0] > MIN_MATCH_COUNT:\n final_matches = good_matches\n \n # FIND MUTUAL AND GOOD MATCHES: \n # Retrieve top 2 nearest neighbors 1->2.\n matches_12_top2 = np.argsort(-matrix_similarity, axis = 1)[:, 0:2]\n matches_12 = matches_12_top2[:, 0] # Save first NN and match similarity.\n \n matrix_distances = np.sqrt(2 - 2 * matrix_similarity)\n \n # Compute Lowe's ratio.\n mask1_good_matches = matrix_distances[list(range(0,matrix_distances.shape[0])), matches_12_top2[:, 0]] / matrix_distances[list(range(0,matrix_distances.shape[0])), matches_12_top2[:, 1]] < threshold\n\n # Retrieve top 2 nearest neighbors 1->2.\n matches_21_top2 = np.argsort(-matrix_similarity.T, axis = 1)[:, 0:2]\n matches_21 = matches_21_top2[:, 0] # Save first NN and match similarity.\n \n matrix_distances_T = np.sqrt(2 - 2 * matrix_similarity.T)\n \n # Compute Lowe's ratio.\n mask2_good_matches = matrix_distances_T[list(range(0,matrix_distances_T.shape[0])), matches_21_top2[:, 0]] / matrix_distances_T[list(range(0,matrix_distances_T.shape[0])), matches_21_top2[:, 1]] < threshold\n \n final_mask_good_matches = mask1_good_matches & mask2_good_matches[matches_12]\n \n # Mutual NN + symmetric ratio test.\n ids1 = np.arange(0, matrix_similarity.shape[0])\n \n mask_mutual_matches = (ids1 == matches_21[matches_12]) & final_mask_good_matches\n \n if np.any(mask_mutual_matches):\n mutual_matches = matches[mask_mutual_matches, :]\n \n if mutual_matches.shape[0] > MIN_MATCH_COUNT:\n final_matches = mutual_matches\n \n print(\"mutual and good matches/matches - %d/%d\" % (mutual_matches.shape[0],matches.shape[0]))\n\n return final_matches\n\n\n# In[3]:\n\n\ndef siftMatch(img1, img2, sift_path_ref, sift_path_image, threshold = 0.75, N = 1):\n \n global extract_sift, NUM_MATCH, subsampling\n \n if extract_sift:\n sift = cv2.SIFT_create()\n kp1, des1 = sift.detectAndCompute(img1,None)\n kp2, des2 = sift.detectAndCompute(img2,None)\n \n m =match_features(des1.T,des2.T, threshold)\n src_pts = np.float32([kp1[i].pt for i in m[:,0]]).reshape(-1,2)\n dst_pts = np.float32([kp2[i].pt for i in m[:,1]]).reshape(-1,2)\n \n else:\n data_ref = scipy.io.loadmat(sift_path_ref)\n dst = data_ref['p'] # (2,N) numpy array, where N is the total number of keypoints\n des_dest = data_ref['d'] # (128,N) numpy array, where N is the total number of keypoints\n \n data_image = scipy.io.loadmat(sift_path_image)\n src = data_image['p'] # (2,N) numpy array, where N is the total number of keypoints\n des_src = data_image['d'] # (128,N) numpy array, where N is the total number of keypoints\n \n if subsampling:\n h, w, _ = img1.shape\n \n h_subsampling = math.floor(h/4) \n \n w_subsampling = math.floor(w * h/(NUM_MATCH*h_subsampling)) \n \n regions_h = range(0, h+1, h_subsampling)\n regions_w = range(0, w+1, w_subsampling)\n \n des_src_subsampling = np.array([], dtype=np.int64).reshape(des_src.shape[0],0)\n src_subsampling = np.array([], dtype=np.int64).reshape(src.shape[0],0)\n \n id_descriptor = np.arange(des_src.shape[1])\n \n for i in range(len(regions_h)-1):\n h_region_min = regions_h[i]\n h_region_max = regions_h[i+1]-1\n \n for j in range(len(regions_w)-1):\n w_region_min = regions_w[j]\n w_region_max = regions_w[j+1]-1\n \n ind_keypoints_region = (src[0,:] > w_region_min) & (src[0,:] < w_region_max) & (src[1,:] > h_region_min) & (src[1,:] < h_region_max)\n \n if np.any(ind_keypoints_region):\n if len(ind_keypoints_region[ind_keypoints_region == True]) < N:\n num_sampling = len(ind_keypoints_region[ind_keypoints_region == True])\n \n else:\n num_sampling = N \n \n ind_d_des = random.sample(list(id_descriptor[ind_keypoints_region]), num_sampling)\n \n des_src_subsampling = np.concatenate((des_src_subsampling, des_src[:, ind_d_des]), axis = 1)\n src_subsampling = np.concatenate((src_subsampling, src[:, ind_d_des]), axis = 1)\n \n des_src = des_src_subsampling\n src = src_subsampling\n \n m =match_features(des_src,des_dest, threshold)\n \n matches_coords = np.concatenate((src[:, m[:,0]], dst[:, m[:, 1]]))\n \n src_pts = matches_coords[0:2, :].T\n dst_pts = matches_coords[2:4, :].T\n \n return src_pts, dst_pts\n\n\n# In[4]:\n\n\ndef FitHomography(selected_matches, N = 4):\n \"\"\" Compute the fitted homography matrix by using N match pairs\n \n [u] [X]\n [v] = H [Y], \n [1] [1]\n being H a 3x3 matrix \n \n This can be arranged in a system Ax = 0, where x is a column vector with \n the parameters of the homography, and A is given by:\n A = [X Y 1 0 0 0 -u.X -u.Y -u]\n [0 0 0 X Y 1 -v.X -v.Y -v]\n\n For N matches, the above matrix is vertically stacked, with 2 rows per match \n \"\"\"\n \n X = selected_matches[:,0]\n Y = selected_matches[:,1]\n u = selected_matches[:,2]\n v = selected_matches[:,3]\n \n A = []\n \n for i in range(N):\n row_1 = np.array([X[i], Y[i], 1, 0, 0, 0, -X[i]*u[i], -Y[i]*u[i], -u[i]])\n row_2 = np.array([0, 0, 0, X[i], Y[i], 1, -X[i]*v[i], -Y[i]*v[i], -v[i]])\n \n A.append(row_1)\n A.append(row_2)\n \n A = np.array(A)\n \n # V = eigvec(A.T @ A), being V.T obtained through Singular Value Decomposition (SVD)\n _, _, vT = np.linalg.svd(A)\n\n # vT is a 9×9 matrix\n # the solution x is the eigenvector corresponding to the smallest eigenvalue, \n # that is, the eigenvector corresponding to the minimum singular value, \n # leading to a row vector of 9 columns. Thus, to obtain the calibrated \n # homography H, the final solution is to reshape the obtained vector into a \n # 3x3 matrix \n H = np.reshape(vT[-1,:], (3,3))\n \n # normalized homography, dividing by the element at (3,3)\n H = H/H[2,2]\n \n return H \n\n\n# In[5]:\n\n\ndef get_errors(all_matches, H):\n \"\"\"Compute error or distance between original points and transformed by H. \n Return an array of errors for all points\"\"\"\n \n num_matches = len(all_matches)\n \n X = all_matches[:,0].reshape(-1, 1)\n Y = all_matches[:,1].reshape(-1, 1)\n u = all_matches[:,2].reshape(-1, 1)\n v = all_matches[:,3].reshape(-1, 1)\n \n # all matching points in source image\n all_p1 = np.concatenate((X, Y, np.ones((len(all_matches),1))), axis = 1)\n \n # all matching points in template image\n all_p2 = np.concatenate((u, v), axis = 1)\n \n # Transform every point in p1 to estimate p2\n estimate_p2homogeneous = H @ all_p1.T\n \n estimate_p2euclidean = (estimate_p2homogeneous/(estimate_p2homogeneous[-1]))[0:2]\n \n # Compute error of each matching pair\n errors = np.linalg.norm(all_p2 - estimate_p2euclidean.T, axis = 1) \n \n return errors\n\n\n# In[6]:\n\n\ndef GetHomographyRANSAC(match_coords):\n \n \"\"\"Function that computes linear (2D) Homography Calibration, implementing RANSAC\n for eliminating outliers and align correspondent matches. The main output concerns \n a single transformation H that gets the most inliers in the course of all the \n iterations. \n \n Args:\n match_coords(numpy.ndarray): In dims (#matched pixels, 4).\n\n Returns:\n H(numpy.ndarray): Homography matrix, dims (3, 3).\n \"\"\"\n \n global MAX_ITER, threshold_error\n \n N = 4 # four matches to initialize the homography in each iteration\n \n max_inliers = 0 \n \n # RANSAC procedure \n for itr in range(MAX_ITER): \n # Randomly select 4 matched pairs\n idx_rand_inliers = random.sample(range(match_coords.shape[0]), N)\n \n selected_matches = match_coords[idx_rand_inliers, :]\n \n # compute the homography H by DLT from the N = 4 matched pairs \n H = FitHomography(selected_matches)\n \n # Find inliners \n errors = get_errors(match_coords, H)\n \n idx_inliers = np.where(errors < threshold_error)[0]\n \n num_inliers = len(idx_inliers) \n \n # Analise current solution, and if it contains the maximum number of inliers\n # amongst all homographies until now fitted, save the current inliers for \n # further refinement of the homography in the last step \n \n if num_inliers > max_inliers:\n max_inliers = num_inliers\n best_inliers = match_coords[idx_inliers]\n \n # compute the homography H by DLT from best_inliers \n H = FitHomography(best_inliers, max_inliers)\n \n return H\n\n\n# In[7]:\n\n\ndef Check_Homography(image, H, image_paths, sift_paths, i):\n \"\"\"\n Check if homography is reasonable, according to certain criteria:\n - If the determinant of the homography det(H) is very close to 0, H is \n close to singular;\n \n - If condition number of H (ratio of the first-to-last singular value) is\n infinite, the matrix H is singular, and if it is too large, H is \n ill-conditioned. In non-mathematical terms, an ill-conditioned problem \n is one where, for a small change in the inputs, there is a large \n change in the output, that is, H is very sensitive to changes or errors \n in the input. This means that the correct solution/answer to the \n equation becomes hard to find;\n\n - If det(H) < 0, the homography is not conserving the orientation, \n being orientation-reversing. This is not suitable, except if we are \n watching the object in a mirror. Nevertheless, sift/surf descriptors \n are not done to be mirror invariant, so if it was the case we would \n probably not have good maches. \n \n An exactly singular matrix means that it is not invertible. If the above \n criteria is verified, more pratically the matrix H is non-invertible. \n In the context of homographies, it means that points in one 2D image are mapped\n to a less-than-2D subspace in the other image (a line, a point). A \n nearly singular matrix is indicative of a rather extreme warp. \n\n \"\"\"\n #Conditions to accertain that the resultant homography H is free of \n #singularities. If one of the condition is satisfied, the Homography H from \n #image space to reference image space is not reasonable, according \n #to the defined criteria \n \n global last_H_good, last_iter_good, extract_sift \n \n if np.linalg.det(H) <= 0.1 or np.linalg.cond(H[0:2, 0:2]) >= 3.25:\n \n # In the condition number, only the top-left 2x2 matrix is considered, \n # thus omitting the z-dependence of the transformation, which should be \n # irrelevant because we know that z will always be fixed to 1 on the input\n \n if last_iter_good != 0:\n print('searching for a reasonable H')\n # if in the current iteration no reasonable homography was estimated, \n # the last homography that was found reasonable is considered \n \n I2_path = image_paths[last_iter_good]\n I2 = img.imread(I2_path)\n \n if extract_sift:\n sift_path1 = None\n sift_path2 = None\n else: \n sift_path2 = sift_paths[last_iter_good]\n sift_path1 = sift_paths[i]\n \n # homography from I2 to template \n H_I2_template = last_H_good\n \n # match points between image (source space) and I2 (destination space)\n m_coords_img, m_coords_temp = siftMatch(image, I2, sift_path2, sift_path1, N = 4)\n match_coords = np.append(m_coords_img, m_coords_temp, axis = 1)\n \n # homography from image space to I2 space \n H_image_I2 = GetHomographyRANSAC(match_coords)\n \n # Conjugate all the performed transformations from original to final warped\n # image (image space--> H_image_I2 --> I2 space --> H_I2_template --> final warp image)\n H = H_I2_template@H_image_I2\n \n # check if obtained homography is reasonable \n if np.linalg.det(H) > 0.1 and np.linalg.cond(H[0:2, 0:2]) < 3.25:\n last_iter_good = i \n last_H_good = H\n \n # If the above strategy was not successful in estimating an homography between\n # image and template, set H to 0 \n else:\n H = np.zeros((3,3))\n \n # Homography H from image space to template space is reasonable, according \n # to the defined criteria \n else:\n last_iter_good = i \n last_H_good = H\n \n return H\n\n\n# In[8]:\n\n\ndef image_wrap(image, template, H):\n \n \"\"\"\n Function that returns image into template perspective according to homography H, \n by applying inverse mapping. \n \n \"\"\"\n \n # Coordinates of the template page corners\n height,width,ch = template.shape\n u_corners = np.array([0, width, width, 0])\n v_corners = np.array([height, height, 0, 0])\n \n max_x = math.ceil(np.ndarray.max(u_corners))\n max_y = math.ceil(np.ndarray.max(v_corners))\n min_x = math.floor(np.ndarray.min(u_corners))\n min_y = math.floor(np.ndarray.min(v_corners))\n \n # bounding box region of the warped image, created by a grid that has the \n # same size as the template\n x, y = np.meshgrid(range(min_x, max_x), range(min_y, max_y))\n \n # homogeneous coordinates in the grid\n x_coords = x.flatten().reshape(1,-1)\n y_coords = y.flatten().reshape(1,-1)\n \n grid_coords = np.concatenate((x_coords, y_coords, np.ones((1, x_coords.shape[1]))))\n \n # reverse warp by applying the inverse of our transformation matrix H\n warp_coords_homogeneous = np.linalg.solve(H, grid_coords)\n \n # To get the warped coordinates, we must divide the first and second coordinates by \n # z to obtain the new x and y (euclidean coordinates)\n z = warp_coords_homogeneous[None, 2, :] \n warp_coords = warp_coords_homogeneous[0:2, :]/np.concatenate((z,z))\n \n # Reshape the pixel grid to have the same size as the template \n x_warp = np.reshape(warp_coords[None, 0, :], (x.shape[0], x.shape[1]))\n y_warp = np.reshape(warp_coords[None, 1, :], (y.shape[0], y.shape[1]))\n \n # Warped Image array that will contain RGB color maps obtained through inverse \n # mapping \n I_WarpColorMaps = np.zeros((template.shape[0], template.shape[1], image.shape[2]))\n \n # color interpolation, by sampling a color value for each pixel in source image. \n # By doing this we won't have any black pixels or gaps in the warpped image, \n # (a kind of undersampling artifact)\n \n for i in range(image.shape[2]):\n # When mapping pixel locations, they most often will not fall exactly on a \n # pixel in the source image. For solving this, we use nearest interpolation (order = 0)\n # for assigning the color value\n \n I_WarpColorMaps[:, :, i] = scipy.ndimage.map_coordinates(image[:, :, i].astype(float), [y_warp, x_warp], order = 0)\n \n # color pixel warping, by converting I_WarpColorMaps into an unsigned 8-bit integer, \n # with the elements of an uint8 ranging from 0 to 255\n I = I_WarpColorMaps.astype('uint8')\n \n return I\n\n\n# In[9]:\n\n\ndef segmentation_skin(I_RGB, kernel_size = 3):\n \"\"\"\n Segmentation by partitioning of the colour space.\n \"\"\"\n\n # clean up with a median filter.\n I_RGB[:,:,0] = median_filter(I_RGB[:,:,0], size=kernel_size)\n I_RGB[:,:,1] = median_filter(I_RGB[:,:,1], size=kernel_size)\n I_RGB[:,:,2] = median_filter(I_RGB[:,:,2], size=kernel_size)\n \n \n # converting from rgb to hsv color space\n I_HSV = cv2.cvtColor(I_RGB,cv2.COLOR_BGR2HSV)\n H = I_HSV[:,:,0]\n S = I_HSV[:,:,1]\n V = I_HSV[:,:,2]\n \n #print(\"H:\", np.ndarray.max(H[1000:1500, 1500:]))\n #print(\"H:\", np.ndarray.min(H[1000:1500, 1500:]))\n #print(\"S:\", np.ndarray.max(S[1000:1500, 1500:]))\n #print(\"S:\", np.ndarray.min(S[1000:1500, 1500:]))\n #print(\"V:\", np.ndarray.max(V[1000:1500, 1500:]))\n #print(\"V:\", np.ndarray.min(V[1000:1500, 1500:]))\n \n # define the upper and lower boundaries of the HSV pixel\n # intensities to be considered 'skin'\n lower = np.array([101, 50, 200], dtype = \"uint8\")\n upper = np.array([120, 110, 255], dtype = \"uint8\")\n \n # determine the HSV pixel intensities that fall into\n # the speicifed upper and lower boundaries\n skinMask = cv2.inRange(I_HSV, lower, upper)\n \n # apply 2 iterations of erosions and a 4 iterations of dilations, respectively, to the mask\n # using an elliptical kernel, in order to remove small false-positive skin regions in the image. \n kernel_erode = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20, 20)) # create an elliptical structuring kernel\n kernel_dilate = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (100, 100))\n \n skinMask = cv2.erode(skinMask, kernel_erode, iterations = 2)\n\n skinMask = cv2.dilate(skinMask, kernel_dilate, iterations = 4)\n \n # apply the mask to the frame\n I_segmented = cv2.bitwise_and(I_RGB, I_RGB, mask = ~skinMask)\n \n return I_segmented, skinMask\n\n\n# In[16]:\n\n\ndef pivproject2022_task1_plus(path_to_template_folder, path_to_input_folder, path_to_output_folder, extract_sift, subsampling, cv2WarpPerspective, segment):\n \n \"\"\"\n Compute the homography between images in a directory and a template\n\n path_to_template_folder: string with the path to a folder with both the \n a jpg file for the template image\n and a mat file with the sift descriptors\n \n path_to_input_folder: string with the path to the input folder, where input images \n and keypoints are stored. Images are named rgb_number.jpg \n (or rgb_number.png) and corresponding keypoints are named \n rgbsift_number.mat\n\n path_to_output_folder: string with the path where homographies between images and \n the template are stored\n \"\"\"\n \n global last_H_good, last_iter_good\n \n # Check if path_to_input_folder was passed. If not, \"No_path\" is assigned\n if not('path_to_input_folder' in locals()):\n path_to_input_folder = \"No_path\";\n \n # Check if output directory exists. If not, output directory is created \n if not(os.path.isdir(path_to_output_folder)):\n os.mkdir(path_to_output_folder)\n \n # Get input rgb images\n rgb_paths = []\n sift_paths = []\n\n for im_path in glob.glob(path_to_input_folder+'/*.jpg'):\n rgb_paths.append(im_path)\n \n if len(rgb_paths) == 0:\n print('ERROR: In the specified path there aren\\'t image input files')\n return \n \n else: \n #Ordering the rgb_paths array, in such a way that consecutive frames follow \n #each other \n image_paths = sorted(rgb_paths) \n \n if not(extract_sift):\n print('Searching for sift .mat files')\n \n for im_path in glob.glob(path_to_input_folder+'/*.mat'):\n sift_paths.append(im_path)\n \n if len(sift_paths) != 0:\n extract_sift = False\n \n sift_paths_ordered = sorted(sift_paths)\n \n else:\n extract_sift = True\n print('In the specified path there aren\\'t sift input files. Thus, a sift function will be used to extract matching points')\n \n # Get template image\n try:\n print('Searching for template')\n \n for path in glob.glob(path_to_template_folder+'/*.jpg'):\n template_path = path\n template = img.imread(template_path)\n \n except:\n print('ERROR: ERROR: In the specified path there isn\\'t a template image, or the directory format ins\\'t compatible with OpenCV, as it only accepts ASCII characters for image paths')\n return\n \n try:\n sift_template_path = template_path[:-4] + '.mat'\n \n except:\n print('ERROR: In the specified path there is not a sift input file. Thus, a sift function will be used to extract matching points')\n \n \n print('Calculating projections')\n \n last_image_no_skin = 0 # variable for saving the last iteration where an image without skin was registered \n last_H_good = 0 # variable for saving the last reasonable homography estimated \n last_iter_good = 0 # variable for saving the last iteration where a reasonable \n # homography was estimated \n \n for i in range(len(image_paths)):\n print(str(i + 1), '/', str(len(image_paths)), 'Image to be processed')\n \n image_path = image_paths[i]\n image = img.imread(image_path)\n \n if extract_sift:\n sift_path = None\n \n else:\n sift_path = sift_paths_ordered[i]\n \n try:\n # coordinates of the matches between image and template \n m_coords_img, m_coords_temp = siftMatch(image, template, sift_template_path, sift_path, N = 4) \n match_coords = np.append(m_coords_img, m_coords_temp, axis = 1)\n \n except:\n print('ERROR: check format of directory, as OpenCV only accepts ASCII characters for image paths')\n return \n \n try:\n H = GetHomographyRANSAC(match_coords)\n \n if extract_sift:\n sift_paths_ordered = None\n \n # Check if homography is reasonable, according to certain criteria\n H = Check_Homography(image, H, image_paths, sift_paths_ordered, i)\n \n except: \n print('ERROR: RANSAC failed to compute homography. Check if there are enough matching keypoints.')\n \n # extract file name for saving subsequenlty the desired outputs in the output folder \n file_name = os.path.split(image_path)[1]\n \n # if the homography is reasonable, perform image warping and segmentation (this last one if set to true) \n if not(np.array_equal(H, np.zeros((3,3)))):\n\n if cv2WarpPerspective:\n I = cv2.warpPerspective(image, H, (template.shape[1], template.shape[0]), flags = cv2.INTER_NEAREST)\n \n else:\n I = image_wrap(image, template, H)\n \n if segment:\n I_segment, skinMask = segmentation_skin(I)\n \n if np.any(skinMask) & np.any(last_image_no_skin) != 0:\n masked_image = cv2.bitwise_and(last_image_no_skin, last_image_no_skin, mask = skinMask)\n \n I_final = (I_segment + masked_image).astype('uint8')\n \n # saving the segmented images \n image_segmented_output_path = path_to_output_folder + '/segmented_' + file_name\n cv2.imwrite(image_segmented_output_path, I_final)\n \n plt.subplot(133)\n plt.title('Segmented Image')\n plt.imshow(I_final)\n plt.axis('off')\n \n elif ~np.any(skinMask):\n last_image_no_skin = I \n \n # if the homography isnt reasonable, the final rendered image is set to zero\n else:\n I = np.zeros((template.shape[0], template.shape[1]))\n \n plt.subplot(131)\n plt.title('original image')\n plt.imshow(image)\n plt.axis('off')\n plt.subplot(132)\n plt.title('Warped Image')\n plt.imshow(I)\n plt.axis('off')\n plt.show()\n \n # Saving outputs \n H_output_path = path_to_output_folder + '/' + 'H_' + file_name[4:8] + '.mat'\n scipy.io.savemat(H_output_path, {'H':H})\n \n # saving the rendered images in the template perspective \n image_output_path = path_to_output_folder + '/' + file_name\n cv2.imwrite(image_output_path, I)\n \n print('All projections calculated.')\n\n\n# In[13]:\n\n\npath_to_template_folder = sys.argv[1]\npath_to_input_folder = sys.argv[2]\npath_to_output_folder = sys.argv[3]\n\nextract_sift = int(sys.argv[4]) # variable that defines if the extraction of sift keypoints and descriptors is to \n # be performed (set to 1, 0 otherwise)\n\nsubsampling = int(sys.argv[5]) # variable that defines if subsampling of the source image descriptors is to \n # be performed (set to 1, 0 otherwise), when performing the matching\n \ncv2WarpPerspective = int(sys.argv[6]) # variable that defines if image warping is to be performed by \n # cv2WarpPerspective built-in function (set to 1) or a function \n # developed by the group (set to 0) \n\nsegment = int(sys.argv[7]) # variable that defines if skin segmentation is to be performed (set to 1, 0 \n # otherwise) \n\n\npivproject2022_task1_plus(path_to_template_folder, path_to_input_folder, path_to_output_folder, extract_sift, subsampling, cv2WarpPerspective, segment)\n\n","repo_name":"marianamourao-37/Image-Processing-and-Vision","sub_path":"tablet_mode/pivproject2022_task1_plus.py","file_name":"pivproject2022_task1_plus.py","file_ext":"py","file_size_in_byte":28943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43627437210","text":"#!/usr/bin/python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@Time : 2023/1/6 15:12\r\n@Author : Haozhao Ma\r\n@Email : haozhaoma@mail.nwpu.edu.cn\r\n@time: 2023/1/6 15:12\r\n\"\"\"\r\nimport os\r\nimport os.path as osp\r\n\r\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\r\nos.environ['CUDA_VISIBLE_DEVICES'] = '3'\r\n\r\nimport torch\r\nimport numpy as np\r\nimport argparse\r\nimport sys\r\nsys.path.append(\"/mnt/cyang_text/solotext\")\r\nimport time\r\nimport json\r\nfrom mmcv import Config\r\n\r\nfrom dataset import build_dataset\r\nfrom models import build_model\r\nfrom models.utils import fuse_module\r\nfrom utils import ResultFormat, AverageMeter\r\n\r\nfrom ctw_eval_all import compute_prh as ctw_prh\r\nfrom msra_eval_all import compute_prh as msra_prh\r\n\r\n\r\ndef test(test_loader, model, cfg):\r\n res = {'less': [], 'more': [], 'both': []}\r\n print('Testing with pretrained model')\r\n contours_count = []\r\n contains = []\r\n for idx, data in enumerate(test_loader):\r\n sys.stdout.flush()\r\n # prepare input\r\n data['imgs'] = data['imgs'].cuda()\r\n instance = data['gt_instance'][0]\r\n with torch.no_grad():\r\n HW, con, conta = model.getHW(**data)\r\n contours_count.extend(con)\r\n contains.extend(conta)\r\n if HW is None:\r\n if len(torch.unique(instance)) == 1:\r\n continue\r\n res['less'].append(data['imgs_meta']['img_path'][0].split('/')[-1])\r\n else:\r\n if len(torch.unique(instance)) == 1:\r\n res['more'].append(data['imgs_meta']['img_path'][0].split('/')[-1])\r\n else:\r\n a = np.zeros(len(torch.unique(instance)) - 1)\r\n b = np.zeros(len(HW))\r\n for u_index, u in enumerate(torch.unique(instance)[1:]):\r\n ins = (instance == u).int()\r\n for index, pos in enumerate(HW):\r\n y, x = pos\r\n if b[index] == 0 and ins[int(y), int(x)] == 1:\r\n b[index] = 1\r\n a[u_index] = 1\r\n\r\n sum_a = np.sum(a)\r\n sum_b = np.sum(b)\r\n if sum_a == len(a) and sum_b != len(b):\r\n # 预测中心点多了,实例之外还存在点\r\n res['more'].append(data['imgs_meta']['img_path'][0].split('/')[-1])\r\n elif sum_a != len(a) and sum_b == len(b):\r\n # 预测中心点少了,有的实例没有覆盖\r\n res['less'].append(data['imgs_meta']['img_path'][0].split('/')[-1])\r\n elif sum_a != len(a) and sum_b != len(b):\r\n # 两种情况都存在\r\n res['both'].append(data['imgs_meta']['img_path'][0].split('/')[-1])\r\n\r\n import pandas as pd\r\n\r\n data = np.concatenate([np.array(contours_count)[:, np.newaxis], np.array(contains)[:, np.newaxis]], axis=1)\r\n df = pd.DataFrame(data=data)\r\n df.to_csv('./contours_contains.csv')\r\n\r\n df2 = pd.DataFrame(res['less'])\r\n df3 = pd.DataFrame(res['more'])\r\n df4 = pd.DataFrame(res['both'])\r\n writer = pd.ExcelWriter('center.xlsx')\r\n df2.to_excel(excel_writer=writer, sheet_name='less')\r\n df3.to_excel(excel_writer=writer, sheet_name='more')\r\n df4.to_excel(excel_writer=writer, sheet_name='both')\r\n writer.save()\r\n\r\n\r\ndef main(checkpoint_path, test_loader, cfg):\r\n sys.stdout.flush()\r\n model = build_model(cfg.model)\r\n model = model.cuda()\r\n model.eval()\r\n if osp.isfile(checkpoint_path):\r\n print(\"Loading model and optimizer from checkpoint '{}'\".format(checkpoint_path))\r\n checkpoint = torch.load(checkpoint_path)\r\n model.load_state_dict(checkpoint['state_dict'])\r\n model = fuse_module(model)\r\n test(test_loader, model, cfg)\r\n\r\n\r\nif __name__ == '__main__':\r\n from config import r18_msra\r\n import warnings\r\n\r\n warnings.filterwarnings('ignore')\r\n\r\n cfg_dict = {'msra': r18_msra}\r\n cfg = cfg_dict['msra']\r\n cfg.data_type = 'MSRA'\r\n cfg.test_cfg.result_path = cfg.result_dir\r\n\r\n cfg.model.backbone.pretrained = False # 不加载resnet的预训练权重,避免浪费时间\r\n cfg.report_speed = False # 是否打印测速度\r\n\r\n # data loader\r\n data_loader = build_dataset(cfg.data.test)\r\n test_loader = torch.utils.data.DataLoader(\r\n data_loader,\r\n batch_size=1,\r\n shuffle=False,\r\n num_workers=0,\r\n )\r\n\r\n epoch = 564\r\n print('▊' * 30 + ' Current Testing Epoch :' + str(epoch) + ' ' + '▊' * 30)\r\n\r\n checkpoint_path = osp.join(cfg.checkpoint_dir+'twelfth', 'checkpoint_' + str(epoch) + 'ep.pth.tar')\r\n main(checkpoint_path, test_loader, cfg)\r\n","repo_name":"omtcyang/Text-Pass-filter","sub_path":"utils/center_instance.py","file_name":"center_instance.py","file_ext":"py","file_size_in_byte":4661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10951124814","text":"import abc\nfrom requests import Session, Response\nfrom serde import from_dict\nfrom typing import Union\nfrom chris.helpers.deserialize import deserialize\n\n\nclass ConnectedResource(abc.ABC):\n \"\"\"\n A base class for resource models which have a session object which is\n used to make requests to CUBE. The session should have an authorization\n token for CUBE.\n\n ## Protocol\n\n Classes which extend `ConnectedResource` **must** be decorated with\n [`@serde.deserialize`](https://yukinarit.github.io/pyserde/api/serde/de.html#deserialize).\n Since `ConnectedResource` itself is not a\n [dataclass](https://docs.python.org/3/library/dataclasses.html),\n its `session` field are not part of the subclass's `__init__`,\n and are not deserialized by _pyserde_.\n After calling `serde.json.from_json`, the program **must** assign a value\n to the deserialized object's `session` field.\n\n ```python\n @serde.deserialize()\n class Thing(ConnectedResource):\n id: int\n\n def get_thing() -> Thing:\n res = session.get(url)\n return Thing.deserialize(res, session)\n ```\n\n ## Visibility\n\n This class and its fields are private-ish. They may be used inside the\n `chris` module, but they should not be used by clients.\n (`protected` in Java-speak, `pub(crate)` in Rust-speak).\n \"\"\"\n\n session: Session\n\n @classmethod\n def deserialize(cls, data: Union[Response, dict], session: Session):\n if isinstance(data, Response):\n o: cls = deserialize(cls, data)\n elif isinstance(data, dict):\n o: cls = from_dict(cls, data)\n else:\n raise TypeError(f\"data type {type(data)} is not Union[Response, dict]\")\n object.__setattr__(o, \"session\", session)\n return o\n","repo_name":"FNNDSC/caw","sub_path":"chris/helpers/connected_resource.py","file_name":"connected_resource.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71321999734","text":"import datetime\nimport unittest\nfrom typing import List\n\nfrom irctk.message import Message, MessageTag\nfrom irctk.nick import Nick\nfrom tests.mock_client import MockClient as Client\n\n\nclass ClientTests(unittest.TestCase):\n def setUp(self) -> None:\n self.client = Client('kylef', 'kyle', 'Kyle Fuller')\n self.client.nick.nick = 'kylef'\n self.client.delegate = self\n\n self.private_messages: List = []\n self.channel_messages: List = []\n\n # Delegate\n\n def irc_private_message(self, client, nick, message):\n self.private_messages.append((client, nick, message))\n\n def irc_channel_message(self, client, nick, channel, message):\n self.channel_messages.append((client, nick, channel, message))\n\n def test_set_delegate(self) -> None:\n self.client.delegate = self\n self.assertEqual(self.client.delegate, self)\n\n # Tests\n\n def test_client_has_nickname(self) -> None:\n self.assertEqual(self.client.nickname, 'kylef')\n\n def test_client_has_ident(self) -> None:\n self.assertEqual(self.client.ident, 'kyle')\n\n def test_client_has_realname(self) -> None:\n self.assertEqual(self.client.realname, 'Kyle Fuller')\n\n # Registration\n\n def test_client_is_not_registered_by_default(self) -> None:\n self.assertFalse(self.client.is_registered)\n\n def test_client_is_registered_after_001(self) -> None:\n self.client.process_line(':irc.kylefuller.co.uk 001 kyle :Welcome')\n self.assertTrue(self.client.is_registered)\n\n def test_client_takes_nick_from_001(self) -> None:\n self.client.process_line(':irc.kylefuller.co.uk 001 kyle5 :Welcome')\n self.assertEqual(self.client.nick.nick, 'kyle5')\n\n def test_client_ignores_message_tags(self) -> None:\n self.client.process_line(\n '@time=bar;foo=x :irc.kylefuller.co.uk 001 kyle :Welcome'\n )\n self.assertTrue(self.client.is_registered)\n\n # Ping\n\n def test_client_sends_pong_when_pinged(self) -> None:\n self.client.process_line('PING :hello')\n self.assertEqual(self.client.sent_lines, ['PONG hello'])\n\n # Nick Change\n\n def test_clients_handles_nick_change(self) -> None:\n self.client.process_line(':irc.example.com 001 kyle :Welcome')\n self.client.process_line(':kyle!kyle@cocode.org NICK kyle2')\n self.assertEqual(self.client.nick.nick, 'kyle2')\n\n def test_clients_handles_nick_change_case_insensitive(self) -> None:\n self.client.process_line(':irc.example.com 001 kyle :Welcome')\n self.client.process_line(':KYLE!kyle@cocode.org NICK kyle2')\n self.assertEqual(self.client.nick.nick, 'kyle2')\n\n # Handling\n\n def test_client_handles_5_parsing_support(self) -> None:\n self.client.process_line(\n ':irc.kylefuller.co.uk 005 kyle :NICKLEN=5 CHANNELLEN=6'\n )\n self.assertEqual(self.client.isupport.maximum_nick_length, 5)\n self.assertEqual(self.client.isupport.maximum_channel_length, 6)\n\n def test_client_handles_joining_channel(self) -> None:\n self.client.process_line(':kylef!kyle@kyle JOIN #test')\n\n channel = self.client.channels[0]\n self.assertEqual(channel.name, '#test')\n self.assertEqual(channel.members[0].nick.nick, self.client.nick.nick)\n self.assertTrue(channel.is_attached)\n\n def test_client_handles_parting_channel(self) -> None:\n channel = self.client.add_channel('#test')\n self.client.process_line(':kylef!kyle@kyle JOIN #test')\n self.client.process_line(':kylef!kyle@kyle PART #test :goodbye')\n self.assertEqual(channel.members, [])\n self.assertFalse(channel.is_attached)\n\n def test_client_handles_parting_channel_without_reason(self) -> None:\n channel = self.client.add_channel('#test')\n self.client.process_line(':kylef!kyle@kyle JOIN #test')\n self.client.process_line(':kylef!kyle@kyle PART #test')\n self.assertEqual(channel.members, [])\n\n def test_client_handles_quit_removing_from_channel(self) -> None:\n channel = self.client.add_channel('#test')\n self.client.process_line(':kylef!kyle@kyle JOIN #test')\n\n self.client.process_line(':doe!kyle@kyle JOIN #test')\n self.assertEqual(len(channel.members), 2)\n\n self.client.process_line(':doe!kyle@kyle QUIT :goodbye')\n self.assertEqual(len(channel.members), 1)\n\n def test_client_handles_getting_kicked_from_channel(self) -> None:\n channel = self.client.add_channel('#test')\n self.client.process_line(':kylef!kyle@kyle JOIN #test')\n self.client.process_line(':kylef!kyle@kyle KICK #test kylef :goodbye')\n self.assertEqual(channel.members, [])\n\n def test_client_handles_channel_new_mode(self) -> None:\n channel = self.client.add_channel('#test')\n self.client.process_line(':kyle!kyle@kyle MODE #test +tn')\n self.assertTrue(channel.modes['t'])\n self.assertTrue(channel.modes['n'])\n\n def test_client_handles_channel_remove_mode(self) -> None:\n channel = self.client.add_channel('#test')\n self.client.process_line(':kyle!kyle@kyle MODE #test +tn')\n self.client.process_line(':kyle!kyle@kyle MODE #test -tn')\n self.assertEqual(channel.modes, {})\n\n def test_client_handles_setting_channel_list_mode(self) -> None:\n channel = self.client.add_channel('#test')\n self.client.process_line(':kyle!kyle@kyle MODE #test +b cake')\n self.client.process_line(':kyle!kyle@kyle MODE #test +b snake')\n self.assertEqual(channel.modes['b'], ['cake', 'snake'])\n\n def test_client_handles_removing_channel_list_mode(self) -> None:\n channel = self.client.add_channel('#test')\n self.client.process_line(':kyle!kyle@kyle MODE #test +b cake')\n self.client.process_line(':kyle!kyle@kyle MODE #test +b snake')\n self.client.process_line(':kyle!kyle@kyle MODE #test -b cake')\n self.assertEqual(channel.modes['b'], ['snake'])\n\n def test_client_handles_removing_channel_list_mode2(self) -> None:\n channel = self.client.add_channel('#test')\n self.client.process_line(':kyle!kyle@kyle MODE #test +l 5')\n self.client.process_line(':kyle!kyle@kyle MODE #test +l 6')\n self.assertEqual(channel.modes['l'], '6')\n\n def test_client_handles_324_mode(self) -> None:\n channel = self.client.add_channel('#test')\n self.client.process_line(':server 324 kylef #test +nt')\n self.assertEqual(channel.modes, {'n': True, 't': True})\n\n def test_client_handles_329_creation_date(self) -> None:\n channel = self.client.add_channel('#test')\n self.client.process_line(':server 329 kylef #test 1358579621')\n self.assertEqual(\n channel.creation_date, datetime.datetime(2013, 1, 19, 7, 13, 41)\n )\n\n def test_client_handles_332_topic(self) -> None:\n channel = self.client.add_channel('#test')\n self.client.process_line(':server 332 kylef #test :My Awesome Topic')\n self.assertEqual(channel.topic, 'My Awesome Topic')\n\n def test_client_handles_333_topic(self) -> None:\n channel = self.client.add_channel('#test')\n self.client.process_line(':server 333 kylef #test james!james@james 1395663680')\n self.assertEqual(channel.topic_owner, 'james!james@james')\n self.assertEqual(channel.topic_date, datetime.datetime(2014, 3, 24, 12, 21, 20))\n\n def test_client_handles_352(self) -> None:\n self.client.process_line(\n ':server 352 kylef * ~doe example.com irc-eu-1.darkscience.net kylef Hs :0 irctk'\n )\n self.assertEqual(self.client.nick.ident, '~doe')\n self.assertEqual(self.client.nick.host, 'example.com')\n\n def test_client_handles_353_names(self) -> None:\n channel = self.client.add_channel('#test')\n self.client.process_line(\n ':server 353 kylef = #test :Derecho!der@der +Tempest!tmp@tmp dijit +other'\n )\n self.assertEqual(len(channel.members), 4)\n self.assertEqual(channel.members[0].nick, Nick.parse('Derecho!der@der'))\n self.assertEqual(channel.members[1].nick, Nick.parse('Tempest!tmp@tmp'))\n self.assertEqual(channel.members[2].nick, Nick(nick='dijit'))\n self.assertEqual(channel.members[3].nick, Nick(nick='other'))\n self.assertTrue(channel.members[1].has_mode('v'))\n self.assertTrue(channel.members[3].has_mode('v'))\n\n def test_client_updates_to_channel_topic(self) -> None:\n channel = self.client.add_channel('#test')\n self.client.process_line(':kyle!kyle@kyle TOPIC #test :Hello World')\n self.assertEqual(channel.topic, 'Hello World')\n\n assert isinstance(channel.topic_owner, Nick)\n self.assertEqual(channel.topic_owner.nick, 'kyle')\n\n def test_client_updates_channel_membership_during_nick_change(self) -> None:\n channel = self.client.add_channel('#test')\n self.client.process_line(':kyle!kyle@kyle JOIN #test')\n self.client.process_line(':kyle!kyle@kyle NICK kyle2')\n\n self.assertEqual(channel.members[0].nick.nick, 'kyle2')\n\n def test_client_updates_channel_membership_during_nick_change_case_insensitive(\n self,\n ):\n channel = self.client.add_channel('#test')\n self.client.process_line(':kyle!kyle@kyle JOIN #test')\n self.client.process_line(':KYLE!kyle@kyle NICK kyle2')\n\n self.assertEqual(channel.members[0].nick.nick, 'kyle2')\n\n # Capabilities\n\n def test_client_asks_for_server_capabilities_on_connection(self) -> None:\n self.client.authenticate()\n self.assertEqual(self.client.sent_lines[0], 'CAP LS')\n\n def test_client_ends_capabilities_negotiation_after_no_caps(self) -> None:\n self.client.authenticate()\n self.client.sent_lines = [] # reset, we dont care about auth stuff\n self.client.process_line(':barjavel.freenode.net CAP * LS :unknown-capability')\n self.assertEqual(self.client.sent_lines, ['CAP END'])\n\n def test_client_requests_multi_prefix_capability(self) -> None:\n self.client.authenticate()\n self.client.sent_lines = [] # reset, we dont care about auth stuff\n self.client.process_line(':barjavel.freenode.net CAP * LS :multi-prefix')\n self.assertEqual(self.client.sent_lines, ['CAP REQ multi-prefix'])\n self.client.sent_lines = []\n self.client.process_line(':barjavel.freenode.net CAP * ACK :multi-prefix')\n self.assertEqual(self.client.sent_lines, ['CAP END'])\n self.assertEqual(self.client.cap_accepted, ['multi-prefix'])\n\n def test_client_requests_multi_prefix_capability_and_handles_rejection(\n self,\n ) -> None:\n self.client.authenticate()\n self.client.sent_lines = [] # reset, we dont care about auth stuff\n self.client.process_line(':barjavel.freenode.net CAP * LS :multi-prefix')\n self.assertEqual(self.client.sent_lines, ['CAP REQ multi-prefix'])\n self.client.sent_lines = []\n self.client.process_line(':barjavel.freenode.net CAP * NAK :multi-prefix')\n self.assertEqual(self.client.sent_lines, ['CAP END'])\n self.assertEqual(self.client.cap_accepted, [])\n\n # Perform\n\n def test_client_perform_on_connect(self) -> None:\n self.client.authenticate()\n\n self.assertEqual(\n self.client.sent_lines,\n ['CAP LS', 'NICK kylef', 'USER kyle 0 * :Kyle Fuller'],\n )\n\n def test_client_perform_on_connect_with_password(self) -> None:\n self.client.password = 'sekret'\n self.client.authenticate()\n\n self.assertEqual(\n self.client.sent_lines,\n ['CAP LS', 'PASS sekret', 'NICK kylef', 'USER kyle 0 * :Kyle Fuller'],\n )\n\n # Delegate\n\n def test_client_forwards_private_messages_to_delegate(self) -> None:\n self.client.process_line(':bob!b@irc.kylefuller.co.uk PRIVMSG kylef :Hey')\n self.assertEqual(\n self.private_messages,\n [(self.client, Nick.parse('bob!b@irc.kylefuller.co.uk'), 'Hey')],\n )\n\n def test_client_forwards_channel_messages_to_delegate(self) -> None:\n self.client.process_line(':kylef!b@irc.kylefuller.co.uk JOIN #example')\n self.client.process_line(':bob!b@irc.kylefuller.co.uk PRIVMSG #example :Hey')\n\n self.assertEqual(len(self.channel_messages), 1)\n self.assertEqual(self.channel_messages[0][1].nick, 'bob')\n self.assertEqual(self.channel_messages[0][2].name, '#example')\n self.assertEqual(self.channel_messages[0][3], 'Hey')\n\n # Sending\n\n def test_client_send_message(self) -> None:\n message = Message(command='PRIVMSG', parameters=['kyle', 'Hello World'])\n self.client.send(message)\n\n self.assertEqual(self.client.sent_lines, ['PRIVMSG kyle :Hello World'])\n\n def test_client_send_message_bad_args(self) -> None:\n message = Message(command='PRIVMSG', parameters=['kyle', 'Hello World'])\n\n with self.assertRaises(TypeError):\n self.client.send(message, 'x')\n\n def test_client_send_privmsg(self) -> None:\n self.client.send_privmsg('kyle', 'Hello')\n self.assertEqual(self.client.sent_lines, ['PRIVMSG kyle :Hello'])\n\n def test_client_send_join(self) -> None:\n self.client.send_join('#palaver')\n self.assertEqual(self.client.sent_lines, ['JOIN #palaver'])\n\n def test_client_send_join_with_key(self) -> None:\n self.client.send_join('#palaver', 'secret')\n self.assertEqual(self.client.sent_lines, ['JOIN #palaver secret'])\n\n def test_client_send_part(self) -> None:\n self.client.send_part('#palaver')\n self.assertEqual(self.client.sent_lines, ['PART #palaver'])\n\n def test_client_send_label_message(self) -> None:\n message = Message(command='PING', parameters=['localhost'])\n message.tags.append(MessageTag(name='label', value='xx'))\n future = self.client.send(message)\n self.assertFalse(future.done())\n\n self.assertEqual(self.client.sent_lines, ['@label=xx PING localhost'])\n\n self.client.process_line('@label=xx PONG localhost')\n self.assertTrue(future.done())\n self.assertEqual(str(future.result()), '@label=xx PONG localhost')\n\n def test_client_send_label_message_ack(self) -> None:\n message = Message(command='PONG', parameters=['localhost'])\n message.tags.append(MessageTag(name='label', value='xx'))\n future = self.client.send(message)\n self.assertFalse(future.done())\n\n self.assertEqual(self.client.sent_lines, ['@label=xx PONG localhost'])\n\n self.client.process_line('@label=xx :irc.example.com ACK')\n self.assertTrue(future.done())\n self.assertEqual(str(future.result()), '@label=xx :irc.example.com ACK')\n\n def test_client_send_label_message_batch(self) -> None:\n message = Message(command='WHOIS', parameters=['kyle'])\n message.tags.append(MessageTag(name='label', value='mGhe5V7RTV'))\n future = self.client.send(message)\n\n self.assertEqual(self.client.sent_lines, ['@label=mGhe5V7RTV WHOIS kyle'])\n\n self.client.process_line(\n '@label=mGhe5V7RTV :irc.example.com BATCH +NMzYSq45x labeled-response'\n )\n self.client.process_line(\n '@batch=NMzYSq45x :irc.example.com 311 client nick ~ident host * :Name'\n )\n self.client.process_line(\n '@batch=NMzYSq45x :irc.example.com 318 client nick :End of /WHOIS list.'\n )\n self.assertFalse(future.done())\n\n self.client.process_line(':irc.example.com BATCH -NMzYSq45x')\n self.assertTrue(future.done())\n self.assertEqual(\n [str(m) for m in future.result()],\n [\n '@label=mGhe5V7RTV :irc.example.com BATCH +NMzYSq45x labeled-response',\n '@batch=NMzYSq45x :irc.example.com 311 client nick ~ident host * Name',\n '@batch=NMzYSq45x :irc.example.com 318 client nick :End of /WHOIS list.',\n ':irc.example.com BATCH -NMzYSq45x',\n ],\n )\n\n def test_client_send_nick(self) -> None:\n message = Message(command='NICK', parameters=['newnick'])\n future = self.client.send(message)\n\n self.assertFalse(future.done())\n self.assertEqual(self.client.sent_lines, ['NICK newnick'])\n\n self.client.process_line(':kylef NICK newnick')\n self.assertTrue(future.done())\n self.assertEqual(str(future.result()), ':kylef NICK newnick')\n\n def test_client_send_nick_no_nickname_given(self) -> None:\n message = Message(command='NICK', parameters=[])\n future = self.client.send(message)\n\n self.assertFalse(future.done())\n self.assertEqual(self.client.sent_lines, ['NICK'])\n\n self.client.process_line(':example.com 431 kylef :No nickname given')\n self.assertTrue(future.done())\n\n def test_client_send_nick_erroneus_nickname(self) -> None:\n message = Message(command='NICK', parameters=['doe'])\n future = self.client.send(message)\n\n self.assertFalse(future.done())\n self.assertEqual(self.client.sent_lines, ['NICK doe'])\n\n self.client.process_line(':example.com 432 kylef doe :Erroneus nickname')\n self.assertTrue(future.done())\n\n def test_client_send_nick_nickname_in_use(self) -> None:\n message = Message(command='NICK', parameters=['doe'])\n future = self.client.send(message)\n\n self.assertFalse(future.done())\n self.assertEqual(self.client.sent_lines, ['NICK doe'])\n\n self.client.process_line(\n ':example.com 433 kylef doe :Nickname is already in use'\n )\n self.assertTrue(future.done())\n\n def test_client_send_nick_nick_collision(self) -> None:\n message = Message(command='NICK', parameters=['doe'])\n future = self.client.send(message)\n\n self.assertFalse(future.done())\n self.assertEqual(self.client.sent_lines, ['NICK doe'])\n\n self.client.process_line(':example.com 436 kylef doe :Nickname collision KILL')\n self.assertTrue(future.done())\n\n def test_client_send_nick_complete_registration(self) -> None:\n message = Message(command='NICK', parameters=['doe'])\n future = self.client.send(message)\n\n self.assertFalse(future.done())\n self.assertEqual(self.client.sent_lines, ['NICK doe'])\n\n self.client.process_line(':irc.example.com 001 doe :Welcome')\n self.assertTrue(future.done())\n","repo_name":"kylef/irctk","sub_path":"tests/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":18487,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"24464027945","text":"import jwt.utils\nimport time\nimport math\nimport requests\nimport uuid\nimport http.client\n\n \nclass InstaCart:\n \n \n def findStores(addressLine1, postalCode):\n \n conn = http.client.HTTPSConnection(\"connect.instacart.com\")\n \n payload = \"{\\n \\\"find_by\\\": {\\n \\\"address_line_1\\\": \\\"\" + addressLine1 + \"\\\",\\n \\\"postal_code\\\": \\\"\" + postalCode + \"\\\"\\n }\\n}\"\n\n headers = {\n 'Accept': \"application/json\",\n 'Content-Type': \"application/json\",\n 'Authorization': \"Bearer \"\n }\n\n conn.request(\"POST\", \"/v2/fulfillment/stores/delivery\", payload, headers)\n\n res = conn.getresponse()\n data = res.read()\n\n print(data.decode(\"utf-8\"))\n \n return data\n \n def createConnectUser(user_id, first_name, last_name):\n conn = http.client.HTTPSConnection(\"connect.instacart.com\")\n\n payload = \"{\\n \\\"user_id\\\": \\\"\" + user_id + \"\\\",\\n \\\"first_name\\\": \\\"\" + first_name + \"\\\",\\n \\\"last_name\\\": \\\"\" + last_name + \"\\\"\\n}\"\n\n headers = {\n 'Accept': \"application/json\",\n 'Content-Type': \"application/json\",\n 'Authorization': \"Bearer \"\n }\n\n conn.request(\"POST\", \"/v2/fulfillment/users\", payload, headers)\n\n res = conn.getresponse()\n data = res.read()\n\n print(data.decode(\"utf-8\"))\n \n return data\n \n # \\\"line_num\\\": \\\"string\\\",\\n \\\"count\\\": 1,\\n \\\"weight\\\": 1,\\n \\\"special_instructions\\\": \\\"string\\\",\\n \\\"replacement_policy\\\": \\\"no_replacements\\\",\\n \\\"replacement_items\\\": [\\n {\\n \\\"upc\\\": \\\"string\\\"\\n }\\n ],\\n \\\"item\\\": {\\n \\\"upc\\\": \\\"string\\\"\\n }\\n }\\n\n def itemsArrayToString(items):\n itemS = \"\\\"items\\\": [\\n\"\n for i in range (0,len(items)-1):\n itemS = itemS + \" {\\n \\\"line_num\\\": \\\"{items[i].lineNum}\\\",\\n \"\n itemS = itemS + \"\\\"count\\\": \\\"{items[i].count}\\\",\\n \"\n itemS = itemS + \"\\\"item\\\": {\\n \\\"upc\\\": \\\"{items[i].upc}\\\"\\n }\\n }\"\n if(i\"\n }\n\n conn.request(\"POST\", \"/v2/fulfillment/users/{user_id}/orders/delivery\", payload, headers)\n\n res = conn.getresponse()\n data = res.read()\n\n print(data.decode(\"utf-8\"))\n \n return data\n\n def findStoresPickup(postalCode):\n conn = http.client.HTTPSConnection(\"connect.instacart.com\")\n \n payload = \"{\\n \\\"find_by\\\": {\\n \\\"postal_code\\\": \\\"\" + postalCode + \"\\\"\\n }\\n}\"\n #Might need this later: #\\n \\\"address_line_1\\\": \\\"\" + addressLine1 + \"\\\",\n\n headers = {\n 'Accept': \"application/json\",\n 'Content-Type': \"application/json\",\n 'Authorization': \"Bearer \"\n }\n\n conn.request(\"POST\", \"/v2/fulfillment/stores/delivery\", payload, headers)\n\n res = conn.getresponse()\n data = res.read()\n\n print(data.decode(\"utf-8\"))\n \n return data\n ","repo_name":"sarpuser/Grocer.io","sub_path":"server/src/instacart_api.py","file_name":"instacart_api.py","file_ext":"py","file_size_in_byte":3911,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"71480947253","text":"# _ \n# (_)\n# _ __ _ _ ______ _ __ _ _ \n# | '_ \\| | | |_ / _` |/ _` | |\n# | |_) | |_| |/ / (_| | (_| | |\n# | .__/ \\__, /___\\__,_|\\__, |_|\n# | | __/ | __/ | \n# |_| |___/ |___/ \n\n\n__title__ = \"pyzagi\"\n__description__ = \"PYZAGI is a handler for Bizagi BPM ODATA API.\"\n__url__ = \"https://github.com/bogpok/pyzagi\"\n__version__ = \"0.0.6\"\n__author__ = \"Bogdan Pokrepin\"\n__author_email__ = \"pokrepin@google.com\"\n__license__ = \"MIT License\"\n__copyright__ = \"Copyright Bogdan Pokrepin\"\n\n\n\n\n","repo_name":"bogpok/pyzagi","sub_path":"src/pyzagi/__version__.py","file_name":"__version__.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31190743987","text":"#! /usr/bin/env python\n\nimport logging\nimport sys\n\nfrom stats_app.ticker_store import TickerStore, MiningHistoryStore, TradeStore, GPUStore, MinerPoolStore, MinerSummaryStore\nfrom stats_app.settings import API_URL, POOLS, TRADE_API_URL, MINER_IP, MINER_PORT\n\nfrom cgminer import CGMiner\n\nlog = logging.getLogger(__name__)\n\n\ndef active_pool():\n miner = CGMiner(MINER_IP, MINER_PORT)\n r = miner.command('pools')\n pools = r.dict()['POOLS']\n active = next((p for p in pools if p['Stratum Active'] is True), None)\n active_pool = POOLS[active['URL']]\n return active_pool\n\n\nif __name__ == \"__main__\":\n try:\n mstore = MiningHistoryStore(active_pool())\n mstore.save()\n except:\n e = sys.exc_info()[0]\n log.error(\"Unable to save MiningHistory. %s\" % e)\n raise\n\n try:\n tstore = TickerStore(API_URL)\n tstore.save()\n except:\n e = sys.exc_info()[0]\n log.error(\"Unable to save Ticker. %s\" % e)\n\n try:\n tradestore = TradeStore(TRADE_API_URL)\n tradestore.save()\n except:\n e = sys.exc_info()[0]\n log.error(\"Unable to save Trade. %s\" % e)\n\n try:\n gpustore = GPUStore('stats')\n gpustore.save()\n except:\n e = sys.exc_info()[0]\n log.error(\"Unable to save GPU stats. %s\" % e)\n\n try:\n poolstore = MinerPoolStore('stats')\n poolstore.save()\n except:\n e = sys.exc_info()[0]\n log.error(\"Unable to save Miner pool stats. %s\" % e)\n\n try:\n minersummary = MinerSummaryStore('summary')\n minersummary.save()\n except:\n e = sys.exc_info()[0]\n log.error(\"Unable either to acquire or to save Miner Summary stats\")\n","repo_name":"wilbur-d/ltc_stats","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"27814118886","text":"\nimport os \n\n\nroot = os.path.dirname(__file__)\n\n\n\nconfigs = dict(\n static_path=os.path.join(root, \"static\"),\n template_path=os.path.join(root, \"templates\"),\n debug=True,\n xsrf_cookies=True,\n cookie_secret='03df65effda044bca54b8b59b5d2d03e'\n)\n\n\nmysql_configs = dict(\n db_host=\"ec2-54-172-175-251.compute-1.amazonaws.com\",\n db_name=\"d8nt0t0evtddcp\",\n db_port=5432,\n db_user=\"ocklswkhlicvau\",\n db_pwd=\"0ef0864eb874ca4af692ff36d91ed54c5ca6bf25f7d5e10f84e3cda62b59a3c8\"\n)\n","repo_name":"JessJiayi/Shorten-URL-Generator","sub_path":"app/configs.py","file_name":"configs.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10770768467","text":"import calendar\nimport datetime as dt\nimport requests\nimport sqlite3\nimport time\nimport re\n\nfrom bs4 import BeautifulSoup\nfrom helpers import ScrapeArchive, ScrapeArticle\n\n\ndef main():\n # Search string\n search_string = \"klima\"\n\n # Url\n url = \"https://www.tagesschau.de/ausland/europa/egmr-klimaklagen-100.html\"\n url = \"https://www.tagesschau.de/inland/innenpolitik/kindergrundsicherung-paus-102.html\"\n\n if re.search(\"^https?://\", url):\n print(\"secure\")\n else:\n print(\"insecure or invalid\")\n\n\n # Request url and get bs4soup\n r = requests.get(url)\n if r.status_code != 200:\n raise ValueError\n soup = BeautifulSoup(r.text, 'html.parser') \n\n # Solved\n shorttext_raw = soup.find('p', class_=re.compile('^textabsatz'))\n try:\n print(shorttext_raw.strong.text.strip())\n except AttributeError:\n try:\n print(shorttext_raw.text.strip())\n except:\n print(\"No Shorttext\")\n\n\n # Solved\n taglist_raw = soup.find('ul', class_='taglist')\n tags_list = []\n try:\n tag_link_list_raw = taglist_raw.find_all('a')\n except AttributeError:\n print(tags_list)\n else:\n for tag in tag_link_list_raw:\n tags_list.append(tag.text.strip())\n\n\n seitenkopf = soup.find('div', class_=re.compile('seitenkopf'))\n print(seitenkopf)\n topline_label_raw = seitenkopf.find('span', class_=re.compile('label--small'))\n print(type(topline_label_raw))\n if topline_label_raw == None:\n print(\"no label found\")\n else:\n print(topline_label_raw.strong.text.strip())\n\n\n if soup.find('article') == None:\n print(\"no article tag\")\n #tag_article = soup.find('article')\n #print(tag_article)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"Guazonuka/cs50p_finalproject","sub_path":"single_article.py","file_name":"single_article.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23148238653","text":"from django.shortcuts import render, get_object_or_404\nfrom django.shortcuts import redirect\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom .models import SafariPackages, DayNumber, Category\n\n# Create your views here.\ndef safaripackages_view(request):\n\tall_safaripackages = SafariPackages.objects.all()\n\tall_categories = Category.objects.all()\n\n#Paginator\n\n\tpaginator = Paginator(all_safaripackages, 12)\t\n\tpage = request.GET.get('page')\t\t\n\ttry:\n\t\tall_safaripackages = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\tall_safaripackages = paginator.page(1)\n\texcept EmptyPage:\n\t\tall_safaripackages = paginator.page(paginator.num_pages)\n\n\n\tcontext = {\n\t'all_safaripackages': all_safaripackages,\n\t'all_categories':all_categories\n\t}\n\n\n\treturn render (request, \"./safaripackages/safaripackages.html\", context)\n\ndef single_safaripackage(request, post_id):\n\tsafaripackage = SafariPackages.objects.get(pk=post_id)\n\tday = DayNumber.objects.filter(daydetails_id = post_id)\n\treturn render (request, './safaripackages/single_safaripackage.html', {'safaripackage':safaripackage,'day':day})\n\n\ndef safaripackage_categories (request, slug):\n\tcategory = Category.objects.get(slug=slug)\n\tall_categories = Category.objects.all()\n\tcontext = {'category':category, 'all_categories':all_categories}\n\treturn render (request, './safaripackages/safaricategory.html', context)","repo_name":"Kimoney/WildPortraitSafaris","sub_path":"safaripackages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73671046131","text":"import FWCore.ParameterSet.Config as cms\n\nfrom Bianchi.TTHStudies.mem_categories_cff import *\n\nprocess = cms.Process(\"datacardMaker\")\n\nprocess.fwliteInput = cms.PSet(\n\n name = cms.string(\"New\"),\n version= cms.string(\"_CSVcalibration_rec_std\"),\n extraname= cms.string(\"\"),\n fname= cms.string(\"MEM\"),\n inputpath= cms.string(\"gsidcap://t3se01.psi.ch:22128//pnfs/psi.ch/cms/trivcat/store/user/bianchi/Trees/MEM/Mar28_2014/ntuplizeAll/\"),\n directory= cms.string(\"Mar25_2014\"),\n cut= cms.string(\"(numJets>=6 && numBTagM==3)\"),\n category= cms.string(\"lepton_pt\"),\n varname = cms.string(\"lepton_pt\"),\n doMEM= cms.int32(4),\n fact1= cms.double(-99),\n fact2= cms.double(-99),\n factbb= cms.double(-99),\n lumiScale= cms.double(19.6/12.1),\n nBins= cms.int32(15),\n nBinsY= cms.untracked.int32(1),\n splitFirstBin= cms.int32(0),\n binvec = cms.vdouble(30., 40, 50., 60., 70., 80., 90., 100., 110., 120., 130., 140., 150., 160., 170., 180.), \n binvecY= cms.vdouble(0.,1.),\n #samples= cms.vstring( \"TTV\", \"SingleT\", \"DiBoson\", \"TTJetsBB\",\n # \"TTJetsBJ\", \"TTJetsJJ\", \"TTH125\", \"EWK\",\n # \"Run2012_SingleMu\", \"Run2012_SingleElectron\"),\n samples= cms.vstring(\"SingleT\"),\n nparts= cms.int32(2),\n part = cms.int32(1),\n analysis = cms.untracked.int32(-1),\n doSystematics = cms.untracked.int32(1),\n )\n\n\n\n#process.fwliteInput = cat1_sb\n","repo_name":"bianchini/TTHStudies","sub_path":"python/datacardMakerFWlite.py","file_name":"datacardMakerFWlite.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"2290425197","text":"import argparse\nfrom pathlib import Path\n\nfrom omnigpt4.utils.download import download\n\n\ndef download_metadata(output_dir: Path):\n for i in range(0, 23099):\n url = f\"https://storage.googleapis.com/ai2-jackh-mmc4-public/data/docs_no_face_shard_{i}_v2.jsonl.zip\"\n try:\n download(url, save_path=output_dir / f\"docs_no_face_shard_{i}_v2.jsonl.zip\")\n except Exception as e:\n print(e, i)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--cache_dir\", type=str, default=\"./.cache\")\n parser.add_argument(\"--output_dir\", type=str, default=\"./data\")\n args = parser.parse_args()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"omni-gpt/OmniGPT4","sub_path":"omnigpt4/data/utils/prepare_mmc4.py","file_name":"prepare_mmc4.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24017829400","text":"\"\"\" Common setings for peer-edit \"\"\"\n\nimport os\nimport logging\n\n# Normally you should not import ANYTHING from Django directly\n# into your settings, but ImproperlyConfigured is an exception.\nfrom django.core.exceptions import ImproperlyConfigured\n\ndef get_env_setting(setting):\n \"\"\" Get the environment setting or return exception \"\"\"\n try:\n return os.environ[setting]\n except KeyError:\n error_msg = \"Set the %s env variable\" % setting\n raise ImproperlyConfigured(error_msg)\n\n# Your project root\nPROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) + \"../../../\")\n\nSUPPORTED_NONLOCALES = ['media', 'admin', 'static']\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en-us'\n\nSITE_ID = 1\n\n# Defines the views served for root URLs.\nROOT_URLCONF = 'peer_edit.urls'\n\n# Application definition\nINSTALLED_APPS = (\n # Django contrib apps\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.admin',\n\n # Database migrations\n 'south',\n\n # Local apps, referenced via appname\n 'edit',\n\n # Amazon S3\n 'storages',\n)\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/home/media/media.example.com/media/\"\nMEDIA_ROOT = ''\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://media.example.com/media/\", \"http://example.com/media/\"\nMEDIA_URL = ''\n\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/home/media/media.example.com/static/\"\nSTATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')\n\n# URL prefix for static files.\n# Example: \"http://media.example.com/static/\"\nSTATIC_URL = '/static/'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n os.path.join(PROJECT_ROOT, 'peer_edit/static'),\n)\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale\nUSE_L10N = True\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = True\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nTEMPLATE_DIRS = (\n # Put strings here, like \"/home/html/django_templates\" or\n # \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n)\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n# 'django.template.loaders.eggs.Loader',\n)\n\n\ndef custom_show_toolbar(request):\n \"\"\" Only show the debug toolbar to users with the superuser flag. \"\"\"\n return request.user.is_superuser\n\n# Specify a custom user model to use\n#AUTH_USER_MODEL = 'accounts.MyUser'\n\n# The WSGI Application to use for runserver\nWSGI_APPLICATION = 'peer-edit.wsgi.application'\n\n# Define your database connections\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.',\n 'NAME': '',\n 'USER': '',\n 'PASSWORD': '',\n 'HOST': '',\n 'PORT': '',\n #'OPTIONS': {\n # 'init_command': 'SET storage_engine=InnoDB',\n # 'charset' : 'utf8',\n # 'use_unicode' : True,\n #},\n #'TEST_CHARSET': 'utf8',\n #'TEST_COLLATION': 'utf8_general_ci',\n },\n # 'slave': {\n # ...\n # },\n}\n\n# Uncomment this and set to all slave DBs in use on the site.\n# SLAVE_DATABASES = ['slave']\n\n# Recipients of traceback emails and other notifications.\nADMINS = (\n ('noah', 'noahm@moroze.com'),\n)\nMANAGERS = ADMINS\n\n# SECURITY WARNING: don't run with debug turned on in production!\n# Debugging displays nice error messages, but leaks memory. Set this to False\n# on all server instances and True only for development.\nDEBUG = TEMPLATE_DEBUG = False\n\n# Hosts/domain names that are valid for this site; required if DEBUG is False\n# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts\nALLOWED_HOSTS = ['*']\n\n# SECURITY WARNING: keep the secret key used in production secret!\n# Hardcoded values can leak through source control.\n# This is an example method of getting the value from an environment setting.\n# Uncomment to use, and then make sure you set the SECRET_KEY environment variable.\n# This is good to use in production, and on services that support it such as Heroku.\nassert 'DJANGO_SECRET_KEY' in os.environ, 'Set DJANGO_SECRET_KEY'\nSECRET_KEY = get_env_setting('DJANGO_SECRET_KEY')\n\n# Uncomment these to activate and customize Celery:\n# CELERY_ALWAYS_EAGER = False # required to activate celeryd\n# BROKER_HOST = 'localhost'\n# BROKER_PORT = 5672\n# BROKER_USER = 'django'\n# BROKER_PASSWORD = 'django'\n# BROKER_VHOST = 'django'\n# CELERY_RESULT_BACKEND = 'amqp'\n\n# Enable these options for memcached\n#CACHE_BACKEND= \"memcached://127.0.0.1:11211/\"\n#CACHE_MIDDLEWARE_ANONYMOUS_ONLY=True\n\n# Set this to true if you use a proxy that sets X-Forwarded-Host\n#USE_X_FORWARDED_HOST = False\n\n## Log settings\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\n# Common Event Format logging parameters\n#CEF_PRODUCT = 'fogpad'\n#CEF_VENDOR = 'Your Company'\n#CEF_VERSION = '0'\n#CEF_DEVICE_VERSION = '0'\n","repo_name":"nmoroze/peer-edit","sub_path":"peer_edit/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":6986,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"74869275252","text":"\"\"\"\nGUI elements for interaction via CV.\n\"\"\"\n\n# Python Libraries\nfrom typing import Tuple, Union\n\n# Third-Party Libraries\nimport cv2\nfrom shapely.geometry import Point\n\n# Local Files\nfrom constants import BPM_SUBDIVISIONS\nfrom geometry_utility import create_rectangle_array, point_intersects, polygon_bounds\n\n\nclass PlusMinusButtons:\n \"\"\"\n Base Class for Button-based GUI elements.\n \"\"\"\n def __init__(\n self, x: int, y: int,\n label: str = \"Label\", label_offset_x: int = 50,\n min_value: int = 1, max_value: int = 100,\n text_color: Tuple[int, int, int] = (255, 255, 255),\n btm_text_color: Tuple[int, int, int] = (4, 201, 126),\n back_color: Tuple[int, int, int] = (255, 255, 255),\n ) -> None:\n\n # Screen Coordinates.\n self.top_left = (x, y)\n self.bottom_right = (x + 50, y + 50) # Length and Height\n\n # Button Design.\n self.label = label # Button Label\n self.text_color = text_color # Text color for label\n self.btm_text_color = btm_text_color # Text color for botton\n self.back_color = back_color # BG color\n self.label_offset_x = self.top_left[0] + label_offset_x # Distance from button\n\n # Create a bounding boxes to detect collisions against the buttons.\n self.minus_bounding_box = create_rectangle_array(\n self.top_left, self.bottom_right)\n self.plus_bounding_box = create_rectangle_array(\n (self.top_left[0] + 100, self.top_left[1]),\n (self.bottom_right[0] + 100, self.bottom_right[1])\n )\n\n # Set range of GUI element.\n self.min_value = min_value\n self.max_value = max_value\n\n self.value = None\n\n def set_value(self, value: int) -> None:\n \"\"\" Update current value if does not exceed min and max values. \"\"\"\n if self.max_value >= value >= self.min_value:\n self.value = int(value)\n\n def set_max_value(self, value: int) -> None:\n \"\"\" Set upper bound of the button selector. \"\"\"\n self.max_value = value\n if self.value > self.max_value:\n self.value = self.max_value\n\n def render(self, img):\n \"\"\" Render button on top of the camera captured image. \"\"\"\n x_1, y_1 = self.top_left\n x_2, y_2 = self.bottom_right\n\n # Create the minus button rectangle.\n cv2.rectangle(\n img, self.top_left, self.bottom_right,\n self.back_color, cv2.FILLED\n )\n\n # Add the 'minus' sign text.\n # The order of drawing sets the display order.\n cv2.putText(\n img, \"-\", (x_1 + 12, y_1 + 35),\n cv2.FONT_HERSHEY_SIMPLEX, 1, self.btm_text_color,\n 2, cv2.LINE_AA\n )\n\n # Create the plus button rectangle.\n cv2.rectangle(\n img, (x_1 + 100, y_1), (x_2 + 100, y_2),\n self.back_color, cv2.FILLED,\n )\n\n # Add the 'plus' sign text.\n cv2.putText(\n img, \"+\", (x_1 + 112, y_1 + 35),\n cv2.FONT_HERSHEY_SIMPLEX, 1, self.btm_text_color,\n 2, cv2.LINE_AA\n )\n\n # Draw the label of the control.\n cv2.putText(\n img, self.label, (self.label_offset_x, y_2),\n cv2.FONT_HERSHEY_SIMPLEX, 1, self.text_color,\n 2, cv2.LINE_AA\n )\n\n # Draw the currently selected value\n cv2.putText(\n img, str(self.value), (x_2 + 150, y_2),\n cv2.FONT_HERSHEY_SIMPLEX, 1, self.text_color,\n 2, cv2.LINE_AA\n )\n\n # Return drawn controls overlaid on the image.\n return img\n\n def minus_btn_check_collision(self, x: int, y: int) -> Union[bool, None]:\n \"\"\"\n Processes events for the minus botton collision (i.e., the\n intersection between a finger landmark and the button).\n \"\"\"\n # Ensure that decreasing the value would not exceed minumum.\n if self.min_value < self.value:\n point = Point(x, y)\n # Decrease value if there was a collision.\n if point_intersects(point, self.minus_bounding_box):\n self.set_value(self.value - 1)\n return True\n\n return False\n\n def plus_btn_check_collision(self, x: int, y: int) -> Union[bool, None]:\n \"\"\"\n Processes events for the plus botton collision (i.e., the\n intersection between a finger landmark and the button).\n \"\"\"\n # Ensure that increasing the value would not exceed maximum.\n if self.max_value > self.value:\n # Convert coordinates into a point.\n point = Point(x, y)\n # Increase value if there was a collision.\n if point_intersects(point, self.plus_bounding_box):\n self.set_value(self.value + 1)\n return True\n\n return False\n\n\nclass SubdivisionsButtons(PlusMinusButtons):\n \"\"\"\n This class overrides the 'set_value()' method in order to map the\n subdivision values to the value declared in the 'BPM_SUBDIVISIONS' dict.\n The plus and minus buttons will change the selected dictionary value.\n \"\"\"\n def init_value(self, value: int) -> None:\n \"\"\"\n Initialize value to prevent an NoneType error when using the setter.\n \"\"\"\n self.value = value\n\n def set_value(self, value: int) -> None:\n if value > self.value:\n # The values here increase only by one step. Since the\n # values are pulled from a dictionary, the key\n # is the value of that can be selected\n if self.value in BPM_SUBDIVISIONS:\n keys = list(BPM_SUBDIVISIONS)\n index = keys.index(self.value)\n if index + 1 < len(keys):\n self.value = keys[index + 1]\n\n elif value < self.value:\n if self.value in BPM_SUBDIVISIONS:\n keys = list(BPM_SUBDIVISIONS)\n index = keys.index(self.value)\n if index - 1 >= 0:\n self.value = keys[index - 1]\n\n\nclass Menu:\n \"\"\" Menus are lists of items and it is mainly used for the scales. \"\"\"\n def __init__(\n self, x: int, y: int,\n menu_dictionary: dict,\n alpha: float = 0.7,\n btm_text_color: Tuple[int, int, int] = (0, 255, 0),\n columns: int = 1, rows: int = 2\n ) -> None:\n self.start_coords = (x, y)\n self.alpha = alpha # Opacity.\n self.btm_text_color = btm_text_color\n self.columns = columns\n self.rows = rows\n self.menu_items = menu_dictionary\n self.menu_items_coordinates = {}\n\n self.value = None\n\n self._init_menu_items_coordinates()\n\n def get_value(self) -> str:\n \"\"\"\n Return the scale name to update the synth if it changed.\n The name of the scale is contained in the first value of the tuple.\n \"\"\"\n return self.value[0]\n\n def init_value(self, value: str) -> None:\n \"\"\" Store the name of the scale and the coordinates as a tuple. \"\"\"\n self.value = (value, self.menu_items_coordinates[value])\n\n def _set_value(self, value: Tuple) -> None:\n \"\"\" Update the chosen scale and coordinates tuple. \"\"\"\n self.value = value\n\n def check_collision(self, x: int, y: int) -> Union[bool, None]:\n \"\"\" Check for collision against menu boxes. \"\"\"\n # Process collisions with menu items.\n for k, v in self.menu_items_coordinates.items():\n if point_intersects((x, y), v):\n self._set_value((k, v))\n return True\n\n return False\n\n def render(self, img):\n \"\"\" Render function for the whole menu. \"\"\"\n for item in self.menu_items.keys():\n overlay = self.render_item(\n self.menu_items_coordinates[item][0],\n self.menu_items_coordinates[item][2],\n item, img\n )\n\n image_new = cv2.addWeighted(\n overlay, self.alpha, img, 1 - self.alpha, 0\n )\n\n return image_new\n\n def render_item(\n self,\n btm_left: Tuple[int, int], top_right: Tuple[int, int],\n item: str, overlay_img\n ):\n \"\"\" Render function for the individual boxes. \"\"\"\n if item in self.value:\n cv2.rectangle(\n overlay_img,\n # The item 1 of the value tuple are the coordinates\n self.value[1][0],\n self.value[1][2],\n (255, 255, 255),\n cv2.FILLED,\n )\n\n cv2.putText(\n overlay_img,\n item,\n (btm_left[0], btm_left[1] - 20),\n cv2.FONT_HERSHEY_SIMPLEX,\n 1,\n self.btm_text_color,\n 2,\n cv2.LINE_AA,\n )\n\n cv2.rectangle(\n overlay_img, btm_left, top_right, self.btm_text_color, 1\n )\n\n return overlay_img\n\n def _init_menu_items_coordinates(self):\n \"\"\"\n Add screen coordinates for each element in the menu.\n \"\"\"\n # Init X and Y for creating menu items\n x, y = self.start_coords\n\n row = 0\n column = 0\n for item in self.menu_items.keys():\n if column < self.columns and row >= self.rows:\n row = 0\n x += 250\n y = self.start_coords[1]\n\n self.menu_items_coordinates[item] = create_rectangle_array(\n (x, y + 5), (x + 240, y - 50)\n )\n\n y += 70\n row += 1\n\n if row == self.rows - 1:\n column += 1\n\n\nclass Slider:\n \"\"\"\n This class creates a slider control where the data value falls inside\n the bounding rectangle.\n \"\"\"\n def __init__(\n self, bpm: int = 100, textlabel: str = \"BPM\",\n x: int = 1000, y: int = 140, min_value: int = 40, max_value: int = 220\n ) -> None:\n # Setting the text to be displayed before the control, to the left\n # of the slider\n self.bpm = bpm\n self.textlabel = textlabel\n # Intializing control layout coordinates\n self.top_left = (x, y)\n self.bottom_right = (x + 225, y + 50)\n\n self.min_value = min_value\n self.max_value = max_value\n\n self.bounding_box = create_rectangle_array(\n self.top_left, self.bottom_right\n )\n\n def set_bpm(self, bpm):\n \"\"\" Set the BPM slider based on the provided range. \"\"\"\n if int(bpm) < self.min_value:\n bpm = self.min_value\n if int(bpm) > self.max_value:\n bpm = self.max_value\n self.bpm = bpm\n\n def render(self, img):\n \"\"\" Draw the slider control. \"\"\"\n\n x_1, y_1 = self.top_left\n\n # Create a containing retangle\n cv2.rectangle(\n img, self.top_left, self.bottom_right, (192, 84, 80), 3\n )\n\n # Create a rectangle that displays the current setting\n cv2.rectangle(\n img, self.top_left, (int(self.bpm + x_1), self.bottom_right[1]),\n (255, 255, 255), cv2.FILLED)\n\n # Place label text the left of the containing rectangle\n cv2.putText(\n img, self.textlabel, (x_1 - 70, y_1 + 50),\n cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255),\n 2, cv2.LINE_AA\n )\n\n # Placing label text inside slider bar\n cv2.putText(\n img, str(int(self.bpm)), (x_1 + 10, y_1 + 40),\n cv2.FONT_HERSHEY_SIMPLEX, 1, (4, 201, 126),\n 2, cv2.LINE_AA\n )\n\n return img\n\n def set_sliders(self, img, x_coord, y_coord):\n \"\"\"\n The method exists to checks to see if the user is\n trying to adjust the slider by intersection of the\n containing control\n\n This method takes the opencv image, but currently adds\n nothing to it.\n \"\"\"\n # Pickup BPM Control\n point = Point(x_coord, y_coord)\n\n if point_intersects(point, self.bounding_box):\n bounds = polygon_bounds(self.bounding_box)\n # The countrol needs to read the X1 boundary\n # to avoid hardcoding of values\n self.set_bpm(int(x_coord - bounds[0]))\n\n return img\n","repo_name":"dcardonab/STCV-Synth","sub_path":"lib/gui_assets.py","file_name":"gui_assets.py","file_ext":"py","file_size_in_byte":12275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"75038713653","text":"import sys\n\ninput = sys.stdin.readline\nN = int(input())\nbuses = list(map(int,input().split()))\n\ndef solution(N, buses):\n answer = 0\n\n cnt = [[0] * N for _ in range(N+1)]\n for i in range(N-1, -1, -1):\n value = buses[i]\n for j in range(i+1):\n cnt[value][j] = 1\n \n for i in range(N):\n for j in range(2, N+1):\n cnt[j][i] += cnt[j-1][i]\n \n for i in range(N-1):\n for j in range(i+1, N):\n if buses[i] < buses[j]:\n answer += cnt[buses[i]][j]\n \n return answer\n\nprint(solution(N, buses))","repo_name":"Sh-IT0311/Coding-Test","sub_path":"softeer/통근버스 출발 순서 검증하기.py","file_name":"통근버스 출발 순서 검증하기.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18938227075","text":"from engine.question.QuestionChoice import QuestionChoice\nfrom engine.question.QuestionType import QuestionType as qt\n\n\nclass QuestionChoiceMultiple(QuestionChoice):\n\n def __init__(self, q_id, question, engine, id_next, labels, value, perfumes, answer_options):\n super().__init__(q_id,\n question,\n qt.MULTIPLE,\n engine,\n id_next,\n labels,\n value,\n perfumes,\n answer_options)\n\n def set_answer(self, answer_list):\n print(\"Setting answer QuestionChoiceMultiple: \", answer_list)\n\n if not isinstance(answer_list, list):\n print(\"QuestionChoiceMultiple should receive list of 1's and 0's\")\n exit(1)\n\n for index, a in enumerate(answer_list):\n if a == 1:\n self._update_ranks(self.labels[index], self.value[index], index)","repo_name":"Lonnekee/PerfumeRecommendations","sub_path":"engine/question/QuestionChoiceMultiple.py","file_name":"QuestionChoiceMultiple.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"16143115503","text":"# -*- coding: UTF-8 -*-\nfrom com.android.monkeyrunner import MonkeyRunner,MonkeyDevice,MonkeyImage\ndevice = MonkeyRunner.waitForConnection()\n\ncomand = 'com.android.dialer/.DialtactsActivity'\ndevice.startActivity(component=comand)\ndevice.drag((339,357),(1075,357),0.1,10)\n\n# 向下滑动极限\n# for i in range(1,30): \n# device.drag((180,180),(600,600),0.1,10)\n# MonkeyRunner.sleep(2)\n# print('dragUp:',i)\n\n# 向下滑动极限\n# for i in range(1,1001): \n# device.drag((400,400),(180,180),0.1,10)\n# MonkeyRunner.sleep(1)\n# device.drag((180,180),(600,600),0.1,10)\n# MonkeyRunner.sleep(1)\n# print('drag:',i) \n\n# 向上滑动极限\n# for i in range(1,1001): \n# device.drag((400,400),(180,180),0.1,10)\n# MonkeyRunner.sleep(1)\n# print('dragUp:',i) \n\n# for i in range(1,3):\n# MonkeyRunner.sleep(10)\n# device.touch(456,1605,'DOWN_AND_UP')\n# print(i)\n# MonkeyRunner.sleep(5)\n# device.reboot()\n\n# device.touch(100, 400, MonkeyDevice.DOWN) \n# device.touch(339, 1075, MonkeyDevice.MOVE)\n# device.touch(88, 656, MonkeyDevice.UP) \n","repo_name":"LTAND/monkeyrunner","sub_path":"huadong.py","file_name":"huadong.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1660771426","text":"import sys\ninput = sys.stdin.readline\nN, C = map(int, input().split())\ncards = [[] for _ in range(N + 1)]\nfor i in range(1, N + 1):\n _, *card = map(int, input().split())\n cards[i] = card\noperators = [[]] + [input().rstrip().split(',') for _ in range(C)]\nvisited = [0] * (N + 1)\nans = set()\n\ndef DFS(n, string):\n if n >= C:\n if string:\n ans.add(string)\n else:\n ans.add('EMPTY')\n return\n\n for i in range(1, N + 1):\n if visited[i] < len(cards[i]):\n next = string\n for k in operators[cards[i][visited[i]]]:\n op, s = k.split()\n if op == 'ADD':\n next += s\n else:\n j = int(s)\n if j >= len(next):\n ans.add('ERROR')\n break\n else:\n next = next[:j] + next[j + 1:]\n else:\n visited[i] += 1\n DFS(n + 1, next)\n visited[i] -= 1\nDFS(0, '')\nprint(*sorted(list(ans)), sep='\\n')","repo_name":"mintropy/algorithm_pulzo","sub_path":"지현배/2110/1021/21776.py","file_name":"21776.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"28501981391","text":"# Number of Islands\n\n# Given a 2d grid map of '1's (land) and '0's (water), count the number\n# of islands. An island is surrounded by water and is formed by\n# connecting adjacent lands horizontally or vertically. You may assume\n# all four edges of the grid are all surrounded by water.\n\n# Example 1:\n# Input:\n# 11110\n# 11010\n# 11000\n# 00000\n# Output: 1\n\n# Example 2:\n# Input:\n# 11000\n# 11000\n# 00100\n# 00011\n# Output: 3\n\nfrom typing import List\n\n\nclass Solution:\n def sink_island(self, row, col, rc, cc, grid):\n grid[row][col] = \"0\"\n if row > 0 and grid[row - 1][col] == \"1\":\n self.sink_island(row - 1, col, rc, cc, grid)\n if row < rc - 1 and grid[row + 1][col] == \"1\":\n self.sink_island(row + 1, col, rc, cc, grid)\n if col > 0 and grid[row][col - 1] == \"1\":\n self.sink_island(row, col - 1, rc, cc, grid)\n if col < cc - 1 and grid[row][col + 1] == \"1\":\n self.sink_island(row, col + 1, rc, cc, grid)\n\n def numIslands(self, grid: List[List[str]]) -> int:\n islands = 0\n rc = len(grid)\n if not rc: return 0\n cc = len(grid[0])\n if not cc: return 0\n\n for i in range(0, rc):\n for j in range(0, cc):\n if grid[i][j] == \"1\":\n islands +=1\n self.sink_island(i, j, rc, cc, grid)\n return islands\n\nt = Solution()\n\ninput = [[\"1\",\"1\",\"1\",\"1\",\"0\"],[\"1\",\"1\",\"0\",\"1\",\"0\"],[\"1\",\"1\",\"0\",\"0\",\"0\"],[\"0\",\"0\",\"0\",\"0\",\"0\"]]\nprint(\"1 = \", t.numIslands(input))\n\ninput = [[\"1\",\"1\",\"0\",\"0\",\"0\"],[\"1\",\"1\",\"0\",\"0\",\"0\"],[\"0\",\"0\",\"1\",\"0\",\"0\"],[\"0\",\"0\",\"0\",\"1\",\"1\"]]\nprint(\"3 = \", t.numIslands(input))\n\ninput = [[\"0\",\"0\",\"0\",\"0\",\"0\"],[\"0\",\"0\",\"0\",\"0\",\"0\"]]\nprint(\"0 = \", t.numIslands(input))\n\ninput = [[\"1\",\"1\",\"0\",\"1\",\"0\"]]\nprint(\"2 = \", t.numIslands(input))\n\ninput = [[\"1\"],[\"1\"],[\"0\"],[\"1\"],[\"0\"]]\nprint(\"2 = \", t.numIslands(input))\n\ninput = [[]]\nprint(\"0 = \", t.numIslands(input))\n\ninput = []\nprint(\"0 = \", t.numIslands(input))\n","repo_name":"DmitryVlaznev/leetcode","sub_path":"200-number-of-islands.py","file_name":"200-number-of-islands.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"25910540384","text":"import csv\nfrom random import randint\n\ndef read_data(data):\n try:\n with open(data, 'r') as f:\n data = [row for row in csv.reader(f.read().splitlines())]\n return data\n except NoneType:\n print(\"Wrong file\")\n\ndef compare_answer(random_color, answer):\n \"\"\" Compare the random color vs the given answer\n\n Parameters\n ----------\n random_color: str\n Random color\n answer: str\n given character\n\n Returns\n -------\n list\n retunrs a list of integers if found as index or False if it wasnt found the answer on the random color\n \"\"\"\n indexes = []\n for index in range(len(random_color)):\n if random_color[index] == answer:\n indexes.append(index)\n if len(indexes) > 0:\n return indexes\n else:\n return []\n\ndef main():\n file_name = input(\"Provide file name: \")\n data = read_data(file_name)\n data_len = (len(data[0]))\n while True:\n win = 0\n random_index = randint(0, (data_len - 1))\n random_color = data[0][random_index].upper()\n tries = len(random_color)\n print(\"-\" * tries)\n out_screen = \"-\" * tries\n while tries > 0:\n answer = input(\"Provide a letter: \").upper()\n if answer.isalpha() and answer in random_color:\n indexes = compare_answer(random_color, answer)\n for index in indexes:\n out_screen = out_screen[:index] + answer + out_screen[index+1:]\n else:\n tries -= 1\n\n if out_screen.find('-') == -1:\n print(\"You won! the color is correct: {}\".format(random_color))\n win = 1\n break\n print(out_screen)\n if not win:\n print(\"You lost!\")\n print(\"The correct color is {}\".format(random_color))\n continue_answer = input(\"Continue Y/N : \")\n if continue_answer.upper() == 'N':\n break\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"morrisunix/python","sub_path":"projects/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"336021661","text":"import sys\nfile = open('in.txt','r',encoding='UTF-8')\nsys.stdout = open('out.txt','w+',encoding='UTF-8')\n\nfor line in file.readlines():\n\tstr = line.split('|')\n\tfor i in range(len(str)):\n\t\tstr[i] = str[i].strip()\n\tf = str[1]\n\tt = str[2]\n\twidth = str[3]\n\toutput = '''\n\tregister #(%s) MEM_WB_%s (\n .clk(clk),\n .rst(FlushW),\n .en(~StallW),\n .d(MEM.%s),\n .q(WB.%s)\n\t);'''% (str[3],str[2],str[1],str[2])\n\tprint(output)\n\n\n\n","repo_name":"wyt2000/ustc-nscscc-2020-1","sub_path":"wyt/readTable.py","file_name":"readTable.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"7662247593","text":"#/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport smartcard.System\nfrom smartcard.CardMonitoring import CardMonitor, CardObserver\n\nMAP_MOIS = {\n \"JANV\" : \"01\",\n \"JAN\" : \"01\",\n \"FEVR\" : \"02\",\n \"FEV\" : \"02\",\n \"MARS\" : \"03\",\n \"MAR\" : \"03\",\n \"AVRI\" : \"04\",\n \"AVR\" : \"04\",\n \"MAI\" : \"05\",\n \"JUIN\" : \"06\",\n \"JUIL\" : \"07\",\n \"AOUT\" : \"08\",\n \"AOU\" : \"08\",\n \"SEPT\" : \"09\",\n \"SEP\" : \"09\",\n \"OCTO\" : \"10\",\n \"OCT\" : \"10\",\n \"NOVE\" : \"11\",\n \"NOV\" : \"11\",\n \"DECE\" : \"12\",\n \"DEC\" : \"12\"\n }\n\nID = [0x3F, 0x00, 0xDF, 0x01, 0x40, 0x31]\nADDRESS = [0x3F, 0x00, 0xDF, 0x01, 0x40, 0x33]\nPHOTO = [0x3F, 0x00, 0xDF, 0x01, 0x40, 0x35]\n\ndef scan_readers():\n return smartcard.System.readers()\n\ndef read_infos(device, read_photo = False):\n # TODO : manage exception\n cnx = device.createConnection()\n cnx.connect()\n\n # select file : informations\n # TODO : manage return codes\n cmd = [0x00, 0xA4, 0x08, 0x0C, len(ID)] + ID \n data, sw1, sw2 = _sendADPU(cnx, cmd)\n\n # read file\n cmd = [0x00, 0xB0, 0x00, 0x00, 256]\n data, sw1, sw2 = _sendADPU(cnx, cmd)\n if \"%x\"%sw1 == \"6c\":\n cmd = [0x00, 0xB0, 0x00, 0x00, sw2]\n data, sw1, sw2 = _sendADPU(cnx, cmd)\n idx = 0\n num_info = 0\n infos = []\n while num_info <= 12:\n num_info = data[idx]\n idx += 1\n len_info = data[idx]\n idx += 1\n chaine_bytes = []\n for x in range(len_info):\n chaine_bytes.append(data[idx])\n idx += 1\n try:\n infos.append(bytes(chaine_bytes).decode(\"utf-8\"))\n except UnicodeDecodeError:\n infos.append(u\"\")\n informations = {\n \"num_carte\" : infos[0],\n \"date_debut\" : infos[2].replace(\".\",\"/\"),\n \"date_fin\" : infos[3].replace(\".\",\"/\"),\n \"lieu_delivrance\" : infos[4],\n \"num_nat\" : infos[5],\n \"nom\" : infos[6],\n \"prenoms\" : infos[7],\n \"suffixe\" : infos[8],\n \"nationalite\" : infos[9],\n \"lieu_naissance\" : infos[10],\n \"date_naissance\" : infos[11].split()[0] + \"/\" + MAP_MOIS[infos[11].split()[1]] + \"/\" + infos[11].split()[2],\n \"sexe\" : infos[12],\n }\n\n # select file : adresse\n cmd = [0x00, 0xA4, 0x08, 0x0C, len(ADDRESS)] + ADDRESS\n data, sw1, sw2 = _sendADPU(cnx, cmd)\n\n # read file\n cmd = [0x00, 0xB0, 0x00, 0x00, 256]\n data, sw1, sw2 = _sendADPU(cnx, cmd)\n if \"%x\"%sw1 == \"6c\":\n cmd = [0x00, 0xB0, 0x00, 0x00, sw2]\n data, sw1, sw2 = _sendADPU(cnx, cmd)\n idx = 0\n num_info = 0\n infos = []\n while num_info <= 2:\n num_info = data[idx]\n idx += 1\n len_info = data[idx]\n idx += 1\n chaine_bytes = []\n for x in range(len_info):\n chaine_bytes.append(data[idx])\n idx += 1\n try:\n infos.append(bytes(chaine_bytes).decode(\"utf-8\"))\n except UnicodeDecodeError:\n infos.append(u\"\")\n\n informations[\"adresse\"] = infos[0]\n informations[\"code_postal\"] = infos[1]\n informations[\"localite\"] = infos[2]\n \n if read_photo:\n # select file : photo\n cmd = [0x00, 0xA4, 0x08, 0x0C, len(PHOTO)] + PHOTO\n data, sw1, sw2 = _sendADPU(cnx, cmd)\n\n photo_bytes = []\n\n offset = 0\n while \"%x\"%sw1 == \"90\":\n cmd = [0x00, 0xB0, offset, 0x00, 256]\n data, sw1, sw2 = _sendADPU(cnx, cmd)\n photo_bytes += data\n offset += 1\n if \"%x\"%sw1 == \"6c\":\n offset -= 1\n cmd = [0x00, 0xB0, offset, 0x00, sw2]\n data, sw1, sw2 = _sendADPU(cnx, cmd)\n photo_bytes += data\n \n photo = bytearray(photo_bytes)\n informations[\"photo\"] = photo\n \n return informations\n\ndef triggered_decorator(func):\n # print(\"Starting observation of reader %s\" % func.__defaults__[0])\n class SimpleObserver(CardObserver):\n reader = func.__defaults__[0]\n def update(self, observable, actions):\n added_cards, removed_cards = actions\n\n # all readers involved in actions\n action_readers = [c.reader for c in added_cards] + [c.reader for c in removed_cards]\n\n # if the one we observe is not the list, we return None\n if not self.reader in action_readers:\n return None\n\n # card added\n if len(added_cards):\n for c in added_cards:\n if c.reader == self.reader:\n return func(action=\"inserted\", card=c, reader=self.reader)\n\n # card removed\n if len(removed_cards):\n for c in removed_cards:\n if c.reader == self.reader:\n return func(action=\"removed\", card=c, reader=self.reader)\n return None\n # installing observer\n cm = CardMonitor()\n so = SimpleObserver()\n cm.addObserver(so)\n return so.update\n\ndef _sendADPU(cnx, apdu):\n response, sw1, sw2 = cnx.transmit(apdu)\n return response, sw1, sw2\n\n\nif __name__ == \"__main__\":\n # test\n for r in scan_readers():\n @triggered_decorator\n def auto_read(action, card, reader=r.name):\n print(\"\\n\", reader, \"\\n--------------------\\n\" )\n if action==\"inserted\":\n print(read_infos(card))\n else:\n print(action)\n\n input(\"Appuyez sur une touche pour terminer\\n\")\n","repo_name":"Lapin-Blanc/python-beid","sub_path":"beid/beid.py","file_name":"beid.py","file_ext":"py","file_size_in_byte":5534,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"2965617887","text":"import os\nimport numpy as np\n\ntry:\n from tensorflow.keras.preprocessing.image import NumpyArrayIterator\nexcept (ImportError, ModuleNotFoundError):\n from keras.preprocessing.image import NumpyArrayIterator\n\nfrom neuralNetwork.import_keras import ImageDataGeneratorOrig, NumpyArrayIteratorPre, K, \\\n array_to_img\nfrom data.conversion_tools import y2bool_annot\nfrom data.image_tools import ImgExt\n\n# TODO this seems ugly import\nfrom keras_preprocessing.image import np as np_random\n\n\ndef get_flow(x, y,\n batch_size=32,\n w_patch=10,\n w_ext_in = 0,\n ):\n \n datagen = SegmentationDataGenerator(\n horizontal_flip=True,\n vertical_flip=True,\n diagonal_flip=True,\n )\n\n flow = datagen.flow(x, y, y2bool_annot(y), w_in=w_patch + w_ext_in,\n w_out=w_patch, batch_size=batch_size)\n\n \n return flow\n\n\n\ndef get_class_imbalance(flow,\n n_batches=1000,\n verbose=1):\n from keras.preprocessing.image import Iterator\n assert isinstance(flow, Iterator)\n \n n_tot = 0\n n_0 = 0\n n_1 = 0\n \n for _ in range(n_batches):\n _, y_i = flow.next()\n \n assert y_i.shape[-1] == 2, 'Not implemented for more than 2 classes'\n \n y_i_0 = y_i[..., 0]\n y_i_1 = y_i[..., 1]\n \n n_tot += y_i_0.size\n n_0 += y_i_0.sum()\n n_1 += y_i_1.sum()\n \n f_0 = n_0 / (n_0 + n_1)\n \n # Fraction of class 1, should ideally be 50%\n f_1 = n_1 / (n_0 + n_1)\n \n f = (f_0, f_1)\n \n if verbose:\n \n f_str = ' '.join(f'{f_i:.4f}' for f_i in f)\n f_pro_str = ' '.join(f'{100*f_i:.2f}%' for f_i in f)\n \n print('class imbalance:\\n'\n f'\\tf = ({f_str}) = ({f_pro_str})\\n'\n f'\\tn_0/n_1: {n_0 / n_1:.2f}')\n \n print(f'fraction annotated = {(n_0 + n_1) / n_tot:.4f} = {100 * (n_0 + n_1) / n_tot:.2f}%')\n \n return f\n\ndef get_class_weights(flow,\n n_batches=1000,\n verbose=1):\n \n f = get_class_imbalance(flow, n_batches=n_batches, verbose=verbose)\n\n class_weight = tuple(1./(2*f_i) for f_i in f)\n \n assert np.dot(class_weight, f) == 1.\n \n if verbose:\n print(f'class weights = {class_weight}')\n\n return class_weight\n\n\n# Extension of original keras imagedatagenerator\nclass ImageDataGenerator(ImageDataGeneratorOrig):\n \"\"\"\n diagonal_flip: whether to randomly flip images diagonally (use only for square images).\n \"\"\"\n \n def __init__(self,\n diagonal_flip=False, # new addition (to include all 8 basic augmentations\n **kwargs\n ):\n super().__init__(**kwargs)\n self.diagonal_flip = diagonal_flip\n \n def random_transform(self, x, seed=None):\n \"\"\"Randomly augment a single image tensor.\n\n # Arguments\n x: 3D tensor, single image.\n seed: random seed.\n\n # Returns\n A randomly transformed version of the input (same shape).\n \"\"\"\n \n img_row_axis = self.row_axis - 1\n img_col_axis = self.col_axis - 1\n \n x = super().random_transform(x, seed=seed)\n \n if self.diagonal_flip:\n if np.random.random() < 0.5:\n x = _flip_diagonal(x, img_row_axis, img_col_axis)\n \n return x\n\n\nclass NumpyArrayCropIterator(NumpyArrayIterator):\n \"\"\"Iterator yielding cropped data from a Numpy array.\n\n Arguments\n x: list of Numpy arrays of input data.\n y: list of Numpy arrays of targets data.\n image_data_generator: Instance of `SegmentationDataGenerator`\n to use for random transformations and normalization.\n mask: a binary 3D performance_map with all the pixels that can be cropped around\n w_in: integer, width of input patch\n w_out: integer, width of output patch\n batch_size: Integer, size of a batch.\n shuffle: Boolean, whether to shuffle the data between epochs.\n seed: Random seed for data shuffling.\n data_format: String, one of `channels_first`, `channels_last`.\n save_to_dir: Optional directory where to save the pictures\n being yielded, in a viewable format. This is useful\n for visualizing the random transformations being\n applied, for debugging purposes.\n save_prefix: String prefix to use for saving sample\n images (if `save_to_dir` is set).\n save_format: Format to use for saving sample images\n (if `save_to_dir` is set).\n subset: Subset of data (`\"training\"` or `\"validation\"`) if\n validation_split is set in SegmentationDataGenerator.\n \"\"\"\n def __init__(self, x, y,\n image_data_generator,\n mask,\n w_in,\n w_out,\n batch_size=32, shuffle=False, seed=None, data_format=None,\n save_to_dir=None, save_prefix='', save_format='png', subset=None):\n \n # If single image, convert it to a list of length one\n if isinstance(x, np.ndarray):\n assert isinstance(y, np.ndarray)\n assert isinstance(mask, np.ndarray)\n x = [x]\n y = [y]\n mask = [mask]\n \n assert isinstance(x, (list, tuple))\n assert isinstance(y, (list, tuple))\n \n if y is not None:\n len_x, len_y, len_mask = len(x), len(y), len(mask)\n \n if len_x != len_y:\n raise ValueError('`x` (images tensor) and `y` (labels) '\n 'should have the same length. '\n 'Found: len(x) = %s, len(y) = %s' %\n (len_x, len_y))\n if len_x != len_mask:\n raise ValueError('`x` (images tensor) and `mask` (mask) '\n 'should have the same length. '\n 'Found: len(x) = %s, len(y) = %s' %\n (len_x, len_mask))\n \n for i in range(len_x):\n # assert x[i]\n assert isinstance(x[i], np.ndarray)\n assert isinstance(y[i], np.ndarray)\n assert isinstance(mask[i], np.ndarray)\n \n assert np.asarray(x[i]).shape[:-1] == np.asarray(y[i]).shape[:-1], (\n '`x` (images tensor list) and `y` (labels list) '\n 'should have the same shape. '\n 'Found: x[i].shape = %s, y[i].shape = %s' %\n (np.asarray(x[i]).shape[:-1], np.asarray(y[i]).shape[:-1]))\n \n if data_format is None:\n data_format = K.image_data_format()\n \n self.x = [np.asarray(x_i, dtype=K.floatx()) for x_i in x]\n \n for i in range(len(self.x)):\n x_i = self.x[i]\n if x_i.ndim != 3:\n raise ValueError('Input data in `NumpyArrayIterator` '\n 'should be list of arrays with rank 3. You passed an array '\n 'with shape', i, ':', x_i.shape)\n \n if y is not None:\n self.y = [np.asarray(y_i, dtype=K.floatx()) for y_i in y]\n \n for i in range(len(self.y)):\n y_i = self.y[i]\n \n assert y_i.shape[:2] == mask[i].shape, ('`y` (labels image) and `mask` (mask labels) '\n 'should have the same shape (except last channel). '\n 'Found: y.shape = %s, mask.shape = %s' %\n (y_i.shape, np.asarray(mask[i]).shape))\n \n assert x_i.ndim == 3, ('Output data in `NumpyArrayIterator` '\n 'should have rank 4. You passed an array '\n 'with shape', i, ':', y_i.shape)\n \n else:\n self.y = None\n \n self.image_data_generator = image_data_generator\n self.data_format = data_format\n self.save_to_dir = save_to_dir\n self.save_prefix = save_prefix\n self.save_format = save_format\n \n if self.y is not None:\n self.xy_list = [np.concatenate([x_i, y_i], axis=-1) for x_i, y_i in zip(self.x, self.y)]\n else:\n self.xy_list = self.x\n \n self.xy_ext = [ImgExt(xy_i, ext=w_in // 2) for xy_i in self.xy_list]\n \n self.mask = mask\n self.w_in = w_in\n self.w_out = w_out\n \n assert self.w_in >= self.w_out, 'w_in should be >= w_out: {} vs {}'.format(self.w_in, self.w_out)\n \n if subset is not None:\n raise NotImplementedError(\n 'Given the current implementation it is not possible to automatically split training and validation.'\n 'This is because there is no standard way to split segmentation images with low amount of samples.')\n \n if self.data_format != 'channels_last':\n raise NotImplementedError('channels last only!', self.data_format)\n \n n = sum([np.count_nonzero(mask_i) for mask_i in mask])\n\n from keras_preprocessing.image.numpy_array_iterator import NumpyArrayIterator as NumpyArrayIteratorPre\n super(NumpyArrayIteratorPre, self).__init__(n, batch_size, shuffle, seed)\n \n # Overwrite\n def _set_index_array(self):\n \n index_array_lst = []\n for i in range(len(self.mask)):\n mask_i = self.mask[i]\n \n a = np.transpose(np.nonzero(mask_i))\n b = np.zeros((a.shape[0], 1), dtype=a.dtype)\n b[:] = i\n c = np.concatenate([b, a], axis=1)\n index_array_lst.append(c)\n \n self.index_array = np.concatenate(index_array_lst, 0)\n \n if self.shuffle:\n np_random.random.shuffle(self.index_array) # only first axis is shuffled\n \n # updated\n def _get_batches_of_transformed_samples(self, index_array):\n \n w_in, w_out = self.w_in, self.w_out\n \n batch_x = np.zeros(tuple([len(index_array), w_in, w_in, self.x[0].shape[-1]]),\n dtype=K.floatx())\n \n if self.y is not None:\n batch_y = np.zeros(tuple([len(index_array), w_out, w_out, self.y[0].shape[-1]]),\n dtype=K.floatx())\n else:\n # TODO if y not given??? Might not need it\n \n raise NotImplementedError('y should be non-zero')\n \n if self.y is not None:\n f_x, f_y = self.x[0].shape[-1], self.y[0].shape[-1]\n else:\n f_x, f_y = self.x[0].shape[-1], 0\n \n xy_list = self.xy_list\n xy_ext_list = self.xy_ext\n \n for i, i_co in enumerate(index_array):\n i_image, i_h, i_w = i_co\n \n # precrop\n ext0 = (w_in) // 2\n \n # h0 = i_h - ext0\n # h1 = i_h + ext0 + 1\n # w0 = i_w - ext0\n # w1 = i_w + ext0 + 1\n # xy = xy_list[i_image][h0:h1, w0:w1, :]\n \n xy = xy_ext_list[i_image].get_crop(i_h, i_w, 1)\n \n xy = self.image_data_generator.random_transform(xy.astype(K.floatx()))\n \n w_precrop = ext0 * 2 + 1 # uneven\n h0_x = (w_precrop - w_in) // 2\n h1_x = w_in + h0_x\n x = xy[h0_x:h1_x, h0_x:h1_x, :f_x]\n # only standardize on x\n x = self.image_data_generator.standardize(x)\n \n # # TODO, solve out of bounds issues\n # # TODO now with workaround...\n # shape_x = x.shape\n # batch_x[i, :shape_x[0], :shape_x[1], :] = x\n batch_x[i] = x\n \n # TODO does this take long to calculate???\n if self.y is not None:\n h0_y = (w_precrop - self.w_out) // 2\n h1_y = self.w_out + h0_y\n \n y = xy[h0_y:h1_y, h0_y:h1_y, f_x:]\n \n # # TODO, solve out of bounds issues\n # # TODO now with workaround...\n # shape_y = y.shape\n #\n # batch_y[i, :shape_y[0], :shape_y[1], :] = y\n batch_y[i] = y\n \n # ext0 = self.w_in // 2\n # ext1 = self.w_in - ext0\n # h0 = i_h - ext0\n # h1 = i_h + ext1\n # w0 = i_w - ext0\n # w1 = i_w + ext1\n #\n # x = self.x[i_image][h0:h1, w0:w1, :]\n # x = self.image_data_generator.random_transform(x.astype(K.floatx()))\n # x = self.image_data_generator.standardize(x)\n \n # if self.y is not None:\n # for i, i_co in enumerate(index_array):\n # i_image, i_h, i_w = i_co\n # ext0 = self.w_out // 2\n # ext1 = self.w_out - ext0\n # h0 = i_h - ext0\n # h1 = i_h + ext1\n # w0 = i_w - ext0\n # w1 = i_w + ext1\n # # TODO y should do the same transform if it is an image :s\n # # TODO idea: concatenate y to x, pass through transform and then split again and perhaps crop further.\n # y = self.y[i_image][h0:h1, w0:w1, :]\n # batch_y[i] = y\n \n if self.save_to_dir:\n for i, i_co in enumerate(index_array):\n i_image, i_h, i_w = i_co\n \n img = array_to_img(batch_x[i], self.data_format, scale=True)\n fname = '{prefix}_{index1}_{index2}_{index3}_{hash}.{format}'.format(prefix=self.save_prefix,\n index1=i_image,\n index2=i_h,\n index3=i_w,\n hash=np.random.randint(1e4),\n format=self.save_format)\n img.save(os.path.join(self.save_to_dir, fname))\n \n if self.y is None:\n return batch_x\n \n return batch_x, batch_y\n\n\nclass SegmentationDataGenerator(ImageDataGenerator):\n def __init__(self,\n validation_split=0.0, # new addition (to include all 8 basic augmentations\n **kwargs\n ):\n \n if validation_split:\n raise NotImplementedError(\n 'Given the current implementation it is not possible to automatically split training and validation.'\n 'This is because there is no standard way to split segmentation images with low amount of samples.')\n \n super().__init__(**kwargs)\n \n def flow(self, x, y, mask, w_in, w_out, batch_size=32, shuffle=True, seed=None,\n save_to_dir=None, save_prefix='', save_format='png', subset=None):\n \"\"\"Takes numpy data & label arrays, and generates batches of\n cropped/augmented/normalized data.\n\n Arguments\n x: data. Should have rank 4.\n In case of grayscale data,\n the channels axis should have value 1, and in case\n of RGB data, it should have value 3.\n y: labels. Should have rank 4 and resolution as x.\n mask: a binary 3D performance_map with all the pixels that can be cropped around\n w_in: integer, width of input patch\n w_out: integer, width of output patch\n batch_size: int (default: 32).\n shuffle: boolean (default: True).\n seed: int (default: None).\n save_to_dir: None or str (default: None).\n This allows you to optionally specify a directory\n to which to save the augmented pictures being generated\n (useful for visualizing what you are doing).\n save_prefix: str (default: `''`). Prefix to use for filenames of saved pictures\n (only relevant if `save_to_dir` is set).\n save_format: one of \"png\", \"jpeg\" (only relevant if `save_to_dir` is set). Default: \"png\".\n subset: Subset of data (`\"training\"` or `\"validation\"`) if\n `validation_split` is set in `ImageDataGenerator`.\n\n Returns\n An Iterator yielding tuples of `(x, y)` where `x` is a numpy array of image data and\n `y` is a numpy array of corresponding labels.\n \"\"\"\n \n if subset is not None:\n raise NotImplementedError(\n 'Given the current implementation it is not possible to automatically split training and validation.'\n 'This is because there is no standard way to split segmentation images with low amount of samples.')\n \n return NumpyArrayCropIterator(\n x, y, self, mask,\n w_in, w_out,\n batch_size=batch_size,\n shuffle=shuffle,\n seed=seed,\n data_format=self.data_format,\n save_to_dir=save_to_dir,\n save_prefix=save_prefix,\n save_format=save_format,\n subset=subset)\n \n def fit(self):\n raise NotImplementedError('Does not work yet with the random cropping!')\n\n\ndef _flip_diagonal(x, img_row_axis, img_col_axis):\n # Transpose along(img_col_axis, img_row_axis)\n \n axes = np.arange(len(np.shape(x)))\n axes[img_row_axis] = img_col_axis\n axes[img_col_axis] = img_row_axis\n axes = tuple(axes)\n x = np.asarray(x).transpose(axes)\n \n return x\n","repo_name":"infelane/lamb-segmentation","sub_path":"preprocessing/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":18074,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"73888394292","text":"\"\"\"\nans = 0, it is the only rabit with this color\n\nans = 1, at least 2 rabitts with this color\nans = 1, can be same color as last one\n\nans = 2, at least 3 rabitts with this color, cannot be the same color as previous rabitts\nans = 2, can be same color as last one\nans = 2, can be same color as last one\n\nans = 2, cannot be same color as last one, must be different color\n at least another 3 rabitts with this color\n\"\"\"\n\nclass Solution:\n def numRabbits(self, answers: List[int]) -> int:\n d = {}\n count = 0\n for ans in answers:\n if ans == 0:\n count += 1\n else:\n # [2] => at least 3 rabbits with same color\n if ans not in d: \n d[ans] = 0\n count += ans + 1\n else:\n d[ans] += 1\n # [2,2,2] => at least 3 with same color\n # [2,2,2,2] => at least 6 rabbits with 2 colors\n if ans == d[ans]: \n del d[ans]\n return count\n\nif __name__ == '__main__':\n\ts = Solution()\n\tprint(s.numRabbits([1,1,2])) # 5\n\tprint(s.numRabbits([10,10,10])) # 11\n\n\n","repo_name":"xiaofanc/leetcode","sub_path":"0781-rabbits-in-forest.py","file_name":"0781-rabbits-in-forest.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28493212162","text":"'''\r\nEvaluation.\r\n\r\n'''\r\nimport numpy as np\r\nfrom skimage import morphology\r\n\r\n\r\ndef fast_hist(label_true, label_pred, n_class):\r\n '''Computational confusion matrix.\r\n -------------------------------------------\r\n | | p_cls_1 | p_cls_2 | .... |\r\n -------------------------------------------\r\n | gt_cls_1 | | | |\r\n -------------------------------------------\r\n | gt_cls_2 | | | |\r\n -------------------------------------------\r\n | .... | | | |\r\n -------------------------------------------\r\n '''\r\n # mask = (label_true >= 0) & (label_true < n_class)\r\n if len(label_true.shape) > 1:\r\n label_true = label_true.flatten()\r\n label_pred = label_pred.flatten()\r\n hist = np.bincount(\r\n n_class * label_true.astype(int) + label_pred,\r\n minlength=n_class ** 2,\r\n ).reshape(n_class, n_class)\r\n return hist\r\n\r\n\r\nclass runingScore(object):\r\n ''' Evaluation class '''\r\n def __init__(self, n_classes=2):\r\n self.n_classes = n_classes\r\n self.confusion_matrix = np.zeros((n_classes, n_classes), dtype=np.int64)\r\n\r\n def reset(self):\r\n ''' Reset confusion_matrix. '''\r\n self.confusion_matrix = np.zeros((self.n_classes, self.n_classes), dtype=np.int64)\r\n\r\n def update_all(self, label_trues, label_preds):\r\n ''' Add new pairs of predicted label and GT label to update the confusion_matrix.\r\n Note: Only suitable for segmentation\r\n '''\r\n for lt, lp in zip(label_trues, label_preds):\r\n self.confusion_matrix += fast_hist(lt, lp, self.n_classes)\r\n\r\n def print_score(self, score, mode=0):\r\n ''' Print the score dict.\r\n mode-0: print the final total scores\r\n mode-1: print per pair of data's scores\r\n '''\r\n str_score = ''\r\n for key in score:\r\n if 'Class' in key:\r\n value_str = ','.join('%.4f' % i for i in score[key])\r\n else:\r\n value_str = '%.4f' % score[key]\r\n str_score += '%s,' % value_str if mode else key+': %s\\n' % value_str\r\n str_score = str_score.strip(',').strip() # discard the last suffix\r\n if mode == 0:\r\n print(str_score)\r\n\r\n return str_score\r\n\r\n\r\nclass RoadExtractionScore(runingScore):\r\n '''Accuracy evaluation for road extraction.\r\n Only two class: 0-bg, 1-road.\r\n '''\r\n\r\n def update(self, label_true, label_pred):\r\n '''Evaluate a new pair of predicted label and GT label,\r\n and update the confusion_matrix. '''\r\n hist = fast_hist(label_true, label_pred, self.n_classes)\r\n self.confusion_matrix += hist\r\n return self.get_scores(hist)\r\n\r\n def add(self, label_true, label_pred):\r\n '''Add a new pair of predicted label and GT label,\r\n update the confusion_matrix. '''\r\n hist = fast_hist(label_true, label_pred, self.n_classes)\r\n self.confusion_matrix += hist\r\n\r\n def get_scores(self, hist=None):\r\n \"\"\"Returns accuracy score evaluation result.\r\n - 1. Precision{ TP / (TP+FP) }\r\n - 2. Recall{ TP / (TP+FN) }\r\n - 3. F1score\r\n - 4. Class IoU\r\n - 5. Mean IoU\r\n - 6. FreqW Acc\r\n \"\"\"\r\n hist = self.confusion_matrix if hist is None else hist\r\n\r\n # Take class 1-road as postive class:\r\n TP = hist[1, 1] # Ture Positive(road pixels are classified into road class)\r\n FN = hist[1, 0] # False Negative(road pixels are classified into bg class)\r\n FP = hist[0, 1] # False Positive(bg pixels are classified into road class)\r\n # TN = hist[0, 0] # Ture Negative(bg pixels are classified into bg class)\r\n\r\n prec = TP / (TP + FP + 1e-8) # Precision\r\n rec = TP / (TP + FN + 1e-8) # Recall\r\n F1 = 2*TP / (2*TP + FP + FN + 1e-8) # F1 Score\r\n\r\n # IoU (tested)\r\n cls_iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))\r\n mean_iu = np.nanmean(cls_iu)\r\n # Frequency Weighted IoU(FWIoU) 根据每个类出现的频率为其设置权重\r\n freq = hist.sum(axis=1) / hist.sum()\r\n fwavacc = (freq[freq > 0] * cls_iu[freq > 0]).sum()\r\n # cls_iu = dict(zip(range(self.n_classes), iu))\r\n\r\n return (\r\n {\r\n 'Precision': prec,\r\n 'Recall': rec,\r\n 'F1score': F1,\r\n 'Class IoU': cls_iu,\r\n 'Mean IoU': mean_iu,\r\n 'FreqW Acc': fwavacc,\r\n } # Return as a dictionary\r\n )\r\n\r\n def keys(self):\r\n score_keys = [\r\n 'Precision,Recall,F1score,Class IoU,Class IoU,Mean IoU,FreqW Acc'\r\n ] # note 'Class IoU'\r\n return score_keys\r\n\r\n\r\nclass RelaxedRoadExtractionScore(runingScore):\r\n '''Relax Accuracy evaluation for road extraction.\r\n Only two class: 0-bg, 1-road.\r\n '''\r\n def __init__(self, rho=1):\r\n self.rho = rho*2 + 1\r\n self.confusion_matrix_p = np.zeros((2, 2), np.int64) # For relaxed precision\r\n self.confusion_matrix_r = np.zeros((2, 2), np.int64) # For relaxed recall\r\n\r\n def update(self, label_true, label_pred):\r\n '''Evaluate a new pair of predicted label and GT label,\r\n and update the confusion_matrix.'''\r\n if self.rho > 1:\r\n selem = morphology.square(self.rho, dtype=label_true.dtype)\r\n tp_label_true = morphology.dilation(label_true, selem)\r\n tp_label_pred = morphology.binary_dilation(label_pred, selem)\r\n hist1 = fast_hist(tp_label_true, label_pred, 2)\r\n hist2 = fast_hist(label_true, tp_label_pred, 2)\r\n else:\r\n hist = fast_hist(label_true, label_pred, 2)\r\n hist1, hist2 = hist, hist\r\n\r\n self.confusion_matrix_p += hist1\r\n self.confusion_matrix_r += hist2\r\n return self.get_scores(hist1, hist2)\r\n\r\n def add(self, label_true, label_pred):\r\n ''' Add new pairs of predicted label and GT label to update the confusion_matrix. '''\r\n if self.rho > 0:\r\n selem = morphology.square(self.rho, dtype=np.int64)\r\n tp_lt = morphology.binary_dilation(label_true, selem)\r\n tp_lp = morphology.binary_dilation(label_pred, selem)\r\n self.confusion_matrix_p += fast_hist(tp_lt, label_pred, 2)\r\n self.confusion_matrix_r += fast_hist(label_true, tp_lp, 2)\r\n else:\r\n hist = fast_hist(label_true, label_pred, 2)\r\n self.confusion_matrix_p += hist\r\n self.confusion_matrix_r += hist\r\n\r\n def get_scores(self, hist_p=None, hist_r=None):\r\n hist_p = self.confusion_matrix_p if hist_p is None else hist_p\r\n hist_r = self.confusion_matrix_r if hist_r is None else hist_r\r\n\r\n prec = hist_p[1, 1] / (hist_p[1, 1] + hist_p[0, 1] + 1e-8) # Precision\r\n rec = hist_r[1, 1] / (hist_r[1, 1] + hist_r[1, 0] + 1e-8) # Recall\r\n f1 = 2 * prec * rec / (prec + rec)\r\n return (\r\n {\r\n \"Precision\": prec,\r\n \"Recall\": rec,\r\n \"F1score\": f1\r\n } # Return as a dictionary\r\n )\r\n\r\n def reset(self):\r\n ''' Reset confusion_matrixs. '''\r\n self.confusion_matrix_p = np.zeros((2, 2), dtype=np.int64)\r\n self.confusion_matrix_r = np.zeros((2, 2), dtype=np.int64)\r\n","repo_name":"ErenTuring/SIINet","sub_path":"utils/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":7425,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"21"} +{"seq_id":"41322311654","text":"import argparse, glob, fnmatch, os, csv, json, re\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas\nfrom scipy.interpolate import interp1d\nimport matplotlib as mpl\nimport matplotlib.style\nmpl.use('TkAgg')\nmpl.style.use('seaborn')\nimport matplotlib.pyplot as plt\n\ndef file_index_key(f):\n\tpattern = r'\\d+$'\n\tkey_match = re.findall(pattern, Path(f).stem)\n\tif len(key_match):\n\t\treturn int(key_match[0])\n\treturn f\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('-p', '--prefix', type=str, nargs='+', default=[''], help='average all files starting with prefix')\n\tparser.add_argument('-r', '--results_path', type=str, nargs='+', default=[''], help='path to results directory')\n\tparser.add_argument('-l', '--label', type=str, nargs='+', default=[None], help='labels')\n\tparser.add_argument('-m', '--max_index', type=int, help='max index of prefix match to use')\n\tparser.add_argument('-a', '--alpha', type=float, default=0.9, help='alpha for emwa')\n\targs = parser.parse_args()\n\n\tprefix = args.prefix\n\tif len(prefix) != len(args.results_path):\n\t\tprefix = prefix * len(args.results_path)\n\n\tlabel = args.label\n\tif len(label) != len(args.results_path):\n\t\tlabel = label * len(args.results_path)\n\n\tmax_epoch = 0\n\tfor prefix, results_path, label in zip(prefix, args.results_path, label):\n\t\tpattern = '{}*.csv'.format(prefix)\n\n\t\tfilenames = fnmatch.filter(os.listdir(results_path), pattern)\n\t\tfilenames.sort(key=file_index_key)\n\n\t\tepoch_to_rewards = {} # epochs x trials\n\t\tnfiles = 0\n\t\tfor i, f in enumerate(filenames):\n\t\t\tif args.max_index and i >= args.max_index:\n\t\t\t\tbreak\n\n\t\t\tf_in = open(os.path.join(results_path, f), 'r')\n\t\t\tmeta = f_in.readline()\n\t\t\treader = csv.reader(f_in)\n\t\t\theaders = next(reader, None)\n\t\t\tif headers != ['episode', 'reward']:\n\t\t\t\traise ValueError('result is malformed')\n\n\t\t\tfor row in reader:\n\t\t\t\trow_dict = dict(zip(headers, row))\n\t\t\t\te = int(row_dict['episode'])\n\t\t\t\trewards = epoch_to_rewards.get(e, [])\n\t\t\t\tr = float(re.findall(r'[-+]?\\d*\\.\\d+|\\d+', row_dict['reward'])[0])\n\t\t\t\trewards.append(r)\n\t\t\t\tif len(rewards) == 1:\n\t\t\t\t\tepoch_to_rewards[e] = rewards\n\n\t\t\tnfiles += 1\n\n\t\tepochs = np.array(sorted([int(k) for k in epoch_to_rewards.keys()]))\n\t\tmax_epoch = max(max([int(k) for k in epoch_to_rewards]), max_epoch)\n\t\tnfiles = min([len(epoch_to_rewards[k]) for k in epoch_to_rewards])\n\t\trewards_ewma = np.zeros((epochs.shape[0], nfiles))\n\t\trewards_avg = np.zeros(len(epochs))\n\t\trewards_std = np.zeros(len(epochs))\n\n\t\tfor i, e in enumerate(epochs):\n\t\t\trewards_ewma[i, :] = epoch_to_rewards[e][:nfiles]\n\n\t\tfor j in range(rewards_ewma.shape[1]):\n\t\t\tdf = pandas.DataFrame(rewards_ewma[:, j])\n\t\t\trewards_ewma[:, j] = np.array(df.ewm(alpha=args.alpha).mean()).squeeze()\n\n\t\tfor e in enumerate(epochs):\n\t\t\trewards_avg = rewards_ewma.mean(1)\n\t\t\trewards_std = rewards_ewma.std(1)\n\n\t\tplt.plot(epochs, rewards_avg, linewidth=2, label=label)\n\t\tplt.fill_between(epochs, rewards_avg - rewards_std, \n\t\t\tnp.minimum(rewards_avg + rewards_std, 1), alpha=0.25)\n\t\n\tthreshold_x = np.linspace(0, max_epoch, 2)\n\tplt.plot(threshold_x, np.ones(threshold_x.shape), \n\t\tzorder=1, color='k', linestyle='dashed', linewidth=1, alpha=0.5, label='Oracle')\n\tplt.axis([0, max_epoch, 0.5, 1.01])\n\tplt.legend(loc='best')\n\tplt.ylabel('Normalized R (Optimal)')\n\tplt.xlabel('# Epochs')\n\txtick_values = range(0, max_epoch + 1000, 1000)\n\txtick_labels = ['{}k'.format(int(x/1000)) for x in xtick_values]\n\txtick_labels[0] = ''\n\tplt.xticks(xtick_values, xtick_labels)\n\tax = plt.gca()\n\tplt.show()\n\n","repo_name":"minqi/learning-to-communicate-pytorch","sub_path":"utils/analyze_results.py","file_name":"analyze_results.py","file_ext":"py","file_size_in_byte":3512,"program_lang":"python","lang":"en","doc_type":"code","stars":323,"dataset":"github-code","pt":"21"} +{"seq_id":"47205422444","text":"# !/usr/bin/env python3\nimport math\n\n\nPLAYER = \"X\"\nAI = \"O\"\nBLANK = \".\"\nWINNINGAMOUNT = 5\n\n\nclass Board():\n def __init__(self, givenBoard):\n self.board = []\n self.winningBoard = False\n self.winner = None\n for y in range(15):\n line = []\n for x in range(15):\n line.append(givenBoard[y][x])\n self.board.append(line)\n\n @classmethod\n def Emptyboard(cls):\n b = cls.__createBoard()\n return Board(b)\n\n @classmethod\n def __createBoard(cls):\n board = []\n for y in range(15):\n line = []\n for x in range(15):\n line.append(BLANK)\n board.append(line)\n return board\n\n def getboard(self):\n return self.board\n\n def getEmpty(self):\n emp = set()\n for y in range(15):\n for x in range(15):\n if self.board[y][x] == BLANK:\n emp.add((x, y))\n return emp\n\n def placeMarker(self, x, y, marker):\n if self.isValidXY(x, y) and self.board[y][x] == BLANK:\n self.board[y][x] = marker\n wonthisround = False\n if(not self.winningBoard):\n self.winningBoard = self.isWinnerboard(x, y, marker)\n wonthisround = True\n if(wonthisround):\n self.winner = marker\n return True\n else:\n return False\n\n def isValidXY(self, x, y):\n return x >= 0 and x < 15 and y >= 0 and y < 15\n\n def getPlacesMarker(self, marker):\n places = []\n for y in range(15):\n for x in range(15):\n if self.board[y][x] != BLANK:\n places.append((x, y))\n return places\n\n def blankPlacesAround(self, x, y):\n places = set()\n for i in range(-1, 2):\n for j in range(-1, 2):\n if self.isValidXY(x+j, y+i) and self.board[y+i][x+j] == BLANK:\n places.add((x+j, y+i))\n return places\n\n def isWinnerboard(self, x, y, marker):\n return self.isRowWin(x, y, marker) or self.isColWin(x, y, marker) or self.isDiagonalWin(x, y, marker)\n\n def isRowWin(self, x, y, marker):\n count = 0\n for i in range(15):\n if self.board[y][i] == marker:\n count += 1\n else:\n count = 0\n if count == 5:\n return True\n return False\n\n def isColWin(self, x, y, marker):\n count = 0\n for i in range(15):\n if self.board[i][x] == marker:\n count += 1\n else:\n count = 0\n if count == 5:\n return True\n return False\n\n def isDiagonalWin(self, x, y, marker):\n count = 0\n for i in range(-5, 5):\n if self.isValidXY(x+i, y+i):\n if self.board[y+i][x+i] == marker:\n count += 1\n else:\n count = 0\n if count == 5:\n return True\n for i in range(-5, 5):\n if self.isValidXY(x+i, y-i):\n if self.board[y-i][x+i] == marker:\n count += 1\n else:\n count = 0\n if count == 5:\n return True\n return False\n\n def evaluate(self, marker):\n enemy = AI\n if marker == AI:\n enemy = PLAYER\n isWin = 0\n if self.winningBoard and self.winner == marker:\n isWin = 100000\n elif self.winningBoard and self.winner == enemy:\n isWin = -100000\n off2my = (self.longSegmentsRows(2, marker, enemy) + self.longSegmentsCols(2, marker, enemy) + self.longSegmentsDiag(2, marker, enemy) + self.longSegmentsDiagLeft(2, marker, enemy)) * 2\n off3MY = (self.longSegmentsRows(3, marker, enemy) + self.longSegmentsCols(3, marker, enemy) + self.longSegmentsDiag(3, marker, enemy) + self.longSegmentsDiagLeft(3, marker, enemy)) * 30\n off4MY = (self.longSegmentsRows(4, marker, enemy) + self.longSegmentsCols(4, marker, enemy) + self.longSegmentsDiag(4, marker, enemy) + self.longSegmentsDiagLeft(4, marker, enemy)) * 150\n off2enemy = (self.longSegmentsRows(2, enemy, marker) + self.longSegmentsCols(2, enemy, marker) + self.longSegmentsDiag(2, enemy, marker) + self.longSegmentsDiagLeft(2, enemy, marker)) * 2\n off3enemy = (self.longSegmentsRows(3, enemy, marker) + self.longSegmentsCols(3, enemy, marker) + self.longSegmentsDiag(3, enemy, marker) + self.longSegmentsDiagLeft(3, enemy, marker)) * 30\n off4enemy = (self.longSegmentsRows(4, enemy, marker) + self.longSegmentsCols(4, enemy, marker) + self.longSegmentsDiag(4, enemy, marker) + self.longSegmentsDiagLeft(4, enemy, marker)) * 200\n return off2my + off3MY + off4MY - off2enemy - off3enemy - off4enemy + isWin\n\n def longSegmentsRows(self, lentght, marker, enemy):\n segLen = 0\n count = 0\n currentSegmeny = []\n for y in range(15):\n currentSegmeny = []\n for cel in range(5):\n currentSegmeny.append(self.board[y][cel])\n for x in range(5, 15, 1):\n segLen = 0\n if enemy not in currentSegmeny:\n for markerInCell in currentSegmeny:\n if markerInCell == marker:\n segLen += 1\n if segLen == lentght:\n count += 1\n currentSegmeny.pop(0)\n currentSegmeny.append(self.board[y][x])\n\n return count\n\n def longSegmentsCols(self, lentght, marker, enemy):\n segLen = 0\n count = 0\n currentSegmeny = []\n for x in range(15):\n currentSegmeny = []\n for cel in range(5):\n currentSegmeny.append(self.board[cel][x])\n for y in range(5, 15, 1):\n segLen = 0\n if enemy not in currentSegmeny:\n for markerInCell in currentSegmeny:\n if markerInCell == marker:\n segLen += 1\n if segLen == lentght:\n count += 1\n currentSegmeny.pop(0)\n currentSegmeny.append(self.board[y][x])\n return count\n\n def longSegmentsDiag(self, lenght, marker, enemy):\n segLen = 0\n count = 0\n for base in range(15):\n i = 0\n segLen = 0\n currentSegmeny = []\n for cel in range(5):\n if self.isValidXY(cel, base+cel):\n currentSegmeny.append(self.board[base+cel][cel])\n i = 5\n while self.isValidXY(i, base+i):\n segLen = 0\n if enemy not in currentSegmeny:\n for markerInCell in currentSegmeny:\n if markerInCell == marker:\n segLen += 1\n if segLen == lenght:\n count += 1\n currentSegmeny.pop(0)\n currentSegmeny.append(self.board[base+i][i])\n i += 1\n currentSegmeny = []\n segLen = 0\n i = 0\n if base != 0:\n for cel in range(5):\n if self.isValidXY(base+cel, cel):\n currentSegmeny.append(self.board[cel][cel+base])\n while self.isValidXY(base+i, i):\n segLen = 0\n if enemy not in currentSegmeny:\n for markerInCell in currentSegmeny:\n if markerInCell == marker:\n segLen += 1\n if segLen == lenght:\n count += 1\n currentSegmeny.pop(0)\n currentSegmeny.append(self.board[i][base+i])\n i += 1\n return count\n\n def longSegmentsDiagLeft(self, lenght, marker, enemy):\n segLen = 0\n count = 0\n for base in range(0, 15):\n i = 0\n segLen = 0\n currentSegmeny = []\n for cel in range(5):\n if self.isValidXY(14-cel, base+cel):\n currentSegmeny.append(self.board[14-cel][base+cel])\n i = 5\n while self.isValidXY(14-i, base+i):\n segLen = 0\n if enemy not in currentSegmeny:\n for markerInCell in currentSegmeny:\n if markerInCell == marker:\n segLen += 1\n if segLen == lenght:\n count += 1\n currentSegmeny.pop(0)\n currentSegmeny.append(self.board[14-i][base+i])\n i += 1\n i = 0\n segLen = 0\n currentSegmeny = []\n if base != 0:\n for cel in range(5):\n if self.isValidXY(14-base-i, cel):\n currentSegmeny.append(self.board[cel][14-base-i])\n i = 5\n while self.isValidXY(14-base-i, i):\n segLen = 0\n if enemy not in currentSegmeny:\n for markerInCell in currentSegmeny:\n if markerInCell == marker:\n segLen += 1\n if segLen == lenght:\n count += 1\n currentSegmeny.pop(0)\n currentSegmeny.append(self.board[i][14 - base-i])\n i += 1\n return count\n\n def showBoard(self):\n print(\" \", end=\" \")\n for i in range(15):\n print(f\"{chr(65+i):>2} \", end=\"\")\n print(\"\")\n print(\" \",end=' ')\n for i in range(15):\n print(f\"{i:>2} \", end=\"\")\n print()\n for y in range(15):\n print(f'{y:>2}', end='')\n for x in range(15):\n print(f\" |{self.board[y][x]}\", end='')\n print(\"\\n\", end='')\n\n\ndef minimax(borad, depth, alpha, beta, maximanix):\n if(depth == 0 or borad.winningBoard):\n return (borad.evaluate(AI), (0, 0)) # move we return here doesn't matter\n\n playerCels = borad.getPlacesMarker(PLAYER)\n moves = set()\n for cell in playerCels:\n for eCell in borad.blankPlacesAround(cell[0], cell[1]):\n moves.add((eCell[0], eCell[1]))\n\n bestMove = None\n\n if maximanix:\n value = - math.inf\n for move in moves:\n childBorard = Board(borad.getboard())\n childBorard.placeMarker(move[0], move[1], AI)\n value = max(value, minimax(childBorard, depth - 1, alpha, beta, False)[0])\n if value > alpha:\n bestMove = move\n alpha = value\n if alpha >= beta:\n break\n return (alpha, bestMove)\n else:\n value = math.inf\n for move in moves:\n childBorard = Board(borad.getboard())\n childBorard.placeMarker(move[0], move[1], PLAYER)\n value = min(value, minimax(childBorard, depth - 1, alpha, beta, True)[0])\n if value < beta:\n bestMove = move\n beta = value\n if beta <= alpha:\n break\n return (beta, bestMove)\n\n\nclass Game:\n def __init__(self):\n self.curPlayer = AI\n self.board = Board.Emptyboard()\n\n def swichPlayer(self):\n if self.curPlayer == AI:\n self.curPlayer = PLAYER\n else:\n self.curPlayer = AI\n\n def playerTurn(self):\n self.board.showBoard()\n print(\"enter x and y(x,y in numbers)\")\n i = input().split(',')\n x = int(i[0])\n y = int(i[1])\n self.board.placeMarker(x, y, PLAYER)\n\n def takemove(self):\n if self.curPlayer == PLAYER:\n self.playerTurn()\n else:\n self.aiMove()\n\n def aiMove(self):\n minMaxReturn = minimax(self.board, 3, -math.inf, math.inf, True)\n self.board.placeMarker(minMaxReturn[1][0], minMaxReturn[1][1], AI)\n print(f\"ai has done move{minMaxReturn[1]}, value {minMaxReturn[0]}\")\n\n def run(self):\n while(not self.board.winningBoard and len(self.board.getEmpty()) != 0):\n self.swichPlayer()\n self.takemove()\n\n if len(self.board.getEmpty()) == 0:\n print(\"DRAW\")\n elif self.curPlayer == PLAYER:\n print(\"player has won\")\n else:\n print(\"Ai has won\")\n self.board.showBoard()\n\n\nif __name__ == \"__main__\":\n g = Game()\n g.run()","repo_name":"noFrostoo/WSI","sub_path":"zad3.py","file_name":"zad3.py","file_ext":"py","file_size_in_byte":12657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2704561280","text":"from tkinter import *\nimport math\n# ---------------------------- CONSTANTS ------------------------------- #\nGRAY = \"#79888c\"\nRED = \"#f95335\"\nGREEN = \"#2e8b57\"\nYELLOW = \"#f7f5dd\"\nCOFFEE = \"#2b0800\"\nLIGHT_COFFEE = \"#aca099\"\nCOFFEE_TITLE = \"#674a40\"\nFONT_NAME = \"Courier\"\nWORK_MIN = 25\nSHORT_BREAK_MIN = 5\nLONG_BREAK_MIN = 20\nreps = 0 # rounds\ntimer = \"None\"\n# ---------------------------- TIMER RESET ------------------------------- #\ndef reset_timer():\n #reseting all the variables\n global reps\n reps = 0\n window.after_cancel(timer)\n Start_BTN['state'] = NORMAL\n Status_LBL.config(text=\"Timer\", foreground=COFFEE)\n canvas.itemconfig(Timer_LBL, text=\"00:00\")\n Check_LBL.config(text=\"\")\n\n# ---------------------------- TIMER MECHANISM ------------------------------- #\n\ndef start_timer():\n global reps\n Start_BTN['state'] = DISABLED #disable the start button\n reps += 1\n work_sec = WORK_MIN * 60\n short_break_sec = SHORT_BREAK_MIN *60\n long_break_sec = LONG_BREAK_MIN * 60\n if reps % 2 == 0:#when to take a break and when to start work\n count_down(short_break_sec)\n Status_LBL.config(text=\"Break\", foreground=GRAY)\n elif reps % 8 == 0 :\n count_down(long_break_sec)\n Status_LBL.config(text=\"Break\",foreground=RED)\n else:\n count_down(work_sec)\n Status_LBL.config(text=\"Work\",foreground=GREEN)\n\n\n\n# ---------------------------- COUNTDOWN MECHANISM ------------------------------- #\ndef count_down(count):\n global timer\n mins = math.floor(count/60)#rounding the number\n secs = count % 60\n if secs < 10:\n secs = f\"0{secs}\"#adding zeros to display as two digits 00:00\n if mins < 10:\n mins = f\"0{mins}\"\n canvas.itemconfig(Timer_LBL, text=f\"{mins}:{secs}\")\n if count > 0:#if the time still runnung or not\n timer = window.after(1000,count_down,count - 1)\n else:\n start_timer()#calling the function again for a break or work\n marks = \"\"\n work_sessions = math.floor(reps/2) # every 2 reps represents 1 round of work and break\n for i in range(work_sessions):\n marks+= \"✔\"\n Check_LBL.config(text=marks)\n\n\n# ---------------------------- UI SETUP ------------------------------- #\n\n#-------window setup--------\n\nwindow = Tk()\nwindow.title(\"Coffee Timer\")\nwindow.config(padx=20,pady=10,bg=YELLOW)\ncanvas = Canvas(width=221,height=300,bg=YELLOW,highlightthickness=0)\nPhoto = PhotoImage(file=\"Coffee1.png\")#background photo\ncanvas.create_image(100,150,image=Photo)\n\ncanvas.grid(column=1,row=1)\n\n#--------buttons setup--------\n\nStart_BTN = Button(text=\"Start\",width=5,background=YELLOW,activebackground=LIGHT_COFFEE,highlightthickness=0,borderwidth=0,command=start_timer)\nStart_BTN.grid(column=0,row=4)\n\nReset_BTN = Button(text=\"Reset\",width=5,background=YELLOW,activebackground=LIGHT_COFFEE,highlightthickness=0,borderwidth=0,command=reset_timer)\nReset_BTN.grid(column=2,row=4)\n\n\n#--------Labels setup--------\n\nStatus_LBL = Label(text=\"Timer\", background=YELLOW, foreground=COFFEE_TITLE, font=(FONT_NAME, 35, \"bold\"))\nStatus_LBL.grid(column=1, row=0)\n\nCheck_LBL = Label(background=YELLOW,foreground=GREEN,font=(FONT_NAME,15,\"normal\"))\nCheck_LBL.grid(column=1,row=3)\n\nTimer_LBL= canvas.create_text(100,200,text=\"00:00\",fill=COFFEE,font=(FONT_NAME,25,\"bold\"))\n\nwindow.mainloop()","repo_name":"SeoBlack/pomodoro","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13471208441","text":"import tensorflow as tf\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Create Initial variable for weight value.\ndef init_variable(shape):\n return(tf.Variable(tf.truncated_normal(shape = shape)))\n\n# Fix random variable\nseed = 0\ntf.set_random_seed(seed)\nnp.random.seed(seed)\n\n# Import data\nDF = pd.read_csv(\n filepath_or_buffer='https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data',\n header=None, sep=',')\nFeatures = DF.ix[:, 0:3].values\nClassStr = DF.ix[:, 4].values\ndf = np.zeros((len(ClassStr), 7))\nfor i in range(len(ClassStr)): # Transform String to Number.\n df[i, 0:4] = Features[i, :]\n if ClassStr[i] == 'Iris-setosa':\n df[i, 4] = 1 # [1, 0, 0]\n elif ClassStr[i] == 'Iris-versicolor':\n df[i, 5] = 1 # [0, 1, 0]\n elif ClassStr[i] == 'Iris-virginica':\n df[i, 6] = 1 # [0, 0, 1]\nnp.random.shuffle(df) # Shuffle row of matrix for test.\n\n# Training Set 70% / Test Set 30%\n# 150 * 0.7 = 105 / 150 * 0.3 = 45\nTraining_Features = df[0:105, 0:4] # Training Set(Features).\nTraining_Classes = df[0:105, 4:7] # Training Set(Classes).\nTest_Features = df[105:, 0:4] # Test Set(Features).\nTest_Classes = df[105:, 4:7] # Test Set(Classes).\n\n# Control panel\nLEARNING_RATE = 0.01\nTRAINING_EPOCHS = 1000\nBATCH_SIZE = 100\n\n# Placeholder variables\nx = tf.placeholder(tf.float32) # Input variable of Features\ny = tf.placeholder(tf.float32) # Input variable of Class\ndropout_prob = tf.placeholder(tf.float32)\n\nNF = 4 # the Number of Features.\nNC = 3 # the Number of Class.\nNN = 100 # the Number of Neuron(Node).\nNUM_LAYER = 10 # the Number of Hidden Layer.\n\n# First Layer\nw0 = init_variable(shape=[NF, NN])/np.sqrt(NF/2)\nb0 = init_variable(shape=[NN])\nlayer = tf.nn.relu(tf.matmul(x, w0) + b0)\nlayer = tf.nn.dropout(layer, dropout_prob)\n\nfor Iter in range(NUM_LAYER):\n w = init_variable(shape=[NN, NN])/np.sqrt(NN/2)\n b = init_variable(shape=[NN])\n layer = tf.nn.relu(tf.matmul(layer, w) + b)\n layer = tf.nn.dropout(layer, dropout_prob)\n\n# Final Layer\nw = init_variable(shape=[NN, NC])/np.sqrt(NN/2)\nb = init_variable(shape=[NC])\nscore = tf.matmul(layer, w) + b\n\n# Optimization method\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=score))\ntrain = tf.train.AdagradOptimizer(LEARNING_RATE).minimize(cost)\n# train = tf.train.AdamOptimizer(LEARNING_RATE).minimize(cost)\nhypothesis = tf.nn.softmax(score)\n\n# Performance measures\nprediction = tf.argmax(score, 1)\ncorrect_prediction = tf.equal(prediction, tf.argmax(y, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n# Create TensorFlow session\nwith tf.Session() as sess:\n # Initialize variables\n tf.global_variables_initializer().run()\n\n # train\n cost_history = []\n for epoch in range(TRAINING_EPOCHS):\n _, cost_this_batch, hypo = sess.run([train, cost, hypothesis],\n feed_dict={x: Training_Features, y: Training_Classes, dropout_prob: 1})\n cost_history = np.append(cost_history, cost_this_batch)\n\n # Plot\n CostFig = plt.figure()\n plt.plot(range(len(cost_history)), cost_history)\n plt.xlabel('Epoch')\n plt.ylabel('Cost')\n plt.axis([0, TRAINING_EPOCHS, 0, np.max(cost_history)])\n CostFig.savefig('Result_Cost_Drop.png', dpi=100)\n\n print(\"Optimization Finished!\")\n print(\"Accuracy: \", sess.run(accuracy, feed_dict={x: Test_Features, y: Test_Classes, dropout_prob: 1}))\n","repo_name":"JunyoungJang/Python","sub_path":"ML.ClassifyIrisData/ANN_dropout.py","file_name":"ANN_dropout.py","file_ext":"py","file_size_in_byte":3649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29999816515","text":"def voitis_nurkademangu(bingo): \n return bingo[0][0] == 'X' and bingo[0][-1] == 'X' and bingo[-1][0] == 'X' and bingo[-1][-1] == 'X'\n\n \ndef x_peadiagonaalil(bingo):\n a = 0\n for i in range(5):\n if bingo[i][i] == 'X':\n a += 1\n return a \n\ndef x_korvaldiagonaalil(bingo):\n a = 0\n loendur = 5\n for i in range(5):\n loendur -= 1\n if bingo[i][loendur] == 'X':\n a += 1\n return a \n\ndef voitis_diagonaalidemangu(bingo):\n return x_peadiagonaalil(bingo) == 5 and x_korvaldiagonaalil(bingo) == 5\n\ndef voitis_taismangu(bingo):\n a = 0\n for i in bingo:\n for j in i:\n if j == 'X':\n a += 1\n return a == 25\n","repo_name":"TurboNaator/Prog_alused","sub_path":"2.3_Kas_on_võitnud.py","file_name":"2.3_Kas_on_võitnud.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"fi","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31792211931","text":"\"\"\"\nauthor : Lee Sang Min\ngithub : https://github.com/sangm1n\ne-mail : dltkd96als@naver.com\n\ntitle : 완주하지 못한 선수\ndescription : Hash\n\"\"\"\n\nfrom collections import defaultdict\n\n\ndef solution(participant, completion):\n dic = defaultdict(int)\n for name in participant:\n dic[name] += 1\n\n for name in completion:\n dic[name] -= 1\n\n return [key for key, value in dic.items() if value == 1][0]\n\n\nif __name__ == '__main__':\n participant = [\"leo\", \"kiki\", \"eden\"]\n completion = [\"eden\", \"kiki\"]\n\n result = solution(participant, completion)\n print(result)\n","repo_name":"sangm1n/problem-solving","sub_path":"Programmers/완주하지 못한 선수.py","file_name":"완주하지 못한 선수.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10538497813","text":"# A Simple Implementation of Push Down Automaton:\n## Defining the 6 tuples:\n#> * Q-->finite set of sets\n#> * Sigma-->finite set of input alphabet\n#> * Gamma-->finite set of stack alphabet\n#> * Delta-->transition relation\n#> * Q0-->start state\n#> * Z-->initial stack symbol\n#> * F-->set of accepting states\n\n# Defining the states:\n\n#* state 0: starting state\n\n#* state 1:From state 0 whenever it sees a 1 or 2, it moves to state 1+pushes the element onto the stack\n#* state 1:From state 1 whenever it sees a 1 or 2, it remains in state 1+pushes the element onto the stack\n\n#* state 2:From state 1 whenever it sees a 0, it moves to state 2+pops from the stack\n#* state 2:From state 1 whenever it sees a 0, it remains in state 2+pops from the stack\n\n#* state 3:From state 0, if it sees a 0,it moves to state 3,the rejected state\n#* state 3:From state 2, if it sees a 1 or 2 , it moves to state 3, the rejected state\n#* state 3:If at the end, the stack is not empty, it moves to state 3,the rejected state\n\n#stack functions\ndef push(a,list1):\n #pushing to the stack/adding to the top of the stack\n list1.append(a)\n return 1\n\ndef pop(list1):\n #for poping from the stack/removing the top element of the stack\n index=len(list1)-1\n if (index>0):\n list1.pop(index)\n return 1\n else:\n return 0\n\n# Q={0,1,2,3}\n# Sigma={0,1,2}\n# Starting state={0}\n# Z=#\n# F={2}\n\n#setting the initial stack symbol\nstack=['#']\n#setting the starting state\nstate=0\n\n#taking the input\ninput_string=input('Enter the String:')\n\n#performing the operations\nl=len(input_string)\ni=0\nif l%2==0:\n while i= 70\")\n if capability[0] < 8 and (ADTYPE == \"bfloat16\" or BDTYPE == \"bfloat16\"):\n pytest.skip(\"Only test bfloat16 on devices with sm >= 80\")\n if (ADTYPE == \"bfloat16\" or BDTYPE == \"bfloat16\") and SPLIT_K != 1:\n pytest.skip(\"bfloat16 matmuls don't allow split_k for now\")\n torch.manual_seed(0)\n # nuke kernel decorators -- will set meta-parameters manually\n kwargs = {'BLOCK_M': BLOCK_M, 'BLOCK_N': BLOCK_N, 'BLOCK_K': BLOCK_K, 'SPLIT_K': SPLIT_K}\n pre_hook = None if SPLIT_K == 1 else lambda nargs: nargs['C'].zero_()\n configs = [triton.Config(kwargs=kwargs, num_warps=NWARP, num_stages=NSTAGE, pre_hook=pre_hook)]\n kernel = triton.ops._matmul.kernel\n kernel.configs = configs\n # kernel.run = kernel.run.run.run\n\n # get matrix shape\n M = BLOCK_M if M is None else M\n N = BLOCK_N if N is None else N\n K = BLOCK_K * SPLIT_K if K is None else K\n\n def get_input(n, m, t, dtype):\n if t:\n return get_input(m, n, False, dtype).t()\n if dtype == \"float8\":\n x = torch.randint(10, 50, (n, m), device=\"cuda\", dtype=torch.int8)\n return f8_to_f16(x)\n dtype = {\"float16\": torch.float16, \"bfloat16\": torch.bfloat16, \"float32\": torch.float32}[dtype]\n return .1 * torch.randn((n, m), device=\"cuda\", dtype=dtype)\n # allocate/transpose inputs\n a = get_input(M, K, AT, ADTYPE)\n b = get_input(K, N, BT, BDTYPE)\n # run test\n th_c = torch.matmul(a, b)\n try:\n tt_c = triton.ops.matmul(a, b)\n torch.testing.assert_allclose(th_c, tt_c, atol=1e-2, rtol=0)\n except triton.OutOfResources as e:\n pytest.skip(str(e))\n","repo_name":"warpalphap/OpenAI-Triton","sub_path":"python/test/unit/operators/test_matmul.py","file_name":"test_matmul.py","file_ext":"py","file_size_in_byte":6837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73791838131","text":"\"\"\"\n@author: Li Xi\n@file: GMN.py\n@time: 2019-05-12 16:29\n@desc:\n\"\"\"\n\nimport argparse\nimport os\nimport sys\nfrom typing import Dict\n\nimport gensim\nfrom allennlp.training import Trainer\n\nfrom models.layers.attention import Attention\nfrom models.layers.decoder import Score\nfrom models.layers.encoder import EventEmbedding\n\nsys.path.append('..')\n\nimport numpy as np\nimport torch\nfrom allennlp.common.util import ensure_list\nfrom allennlp.data import Vocabulary\nfrom allennlp.data.iterators import BucketIterator\nfrom allennlp.models.model import Model\nfrom allennlp.modules import TextFieldEmbedder\nfrom allennlp.modules.text_field_embedders import BasicTextFieldEmbedder\nfrom allennlp.training.metrics import BooleanAccuracy, F1Measure\nfrom overrides import overrides\nfrom torch import nn\nfrom torch import optim\nfrom torch.nn import Embedding, Tanh, Sigmoid, BCELoss\n\nfrom models.event_reader import EventDataReader\n\n\n@Model.register(\"GMN\")\nclass GMN(Model):\n\n def __init__(self,\n args,\n word_embeddings: TextFieldEmbedder,\n vocab: Vocabulary) -> None:\n super().__init__(vocab)\n\n # parameters\n self.args = args\n self.word_embeddings = word_embeddings\n\n # gate\n self.W_z = nn.Linear(self.args.embedding_size, 1, bias=False)\n self.U_z = nn.Linear(self.args.embedding_size, 1, bias=False)\n self.W_r = nn.Linear(self.args.embedding_size, 1, bias=False)\n self.U_r = nn.Linear(self.args.embedding_size, 1, bias=False)\n self.W = nn.Linear(self.args.embedding_size, 1, bias=False)\n self.U = nn.Linear(self.args.embedding_size, 1, bias=False)\n\n # layers\n self.event_embedding = EventEmbedding(args, self.word_embeddings)\n self.attention = Attention(self.args.embedding_size, score_function='mlp')\n self.sigmoid = Sigmoid()\n self.tanh = Tanh()\n self.score = Score(self.args.embedding_size, self.args.embedding_size, threshold=self.args.threshold)\n\n # metrics\n self.accuracy = BooleanAccuracy()\n self.f1_score = F1Measure(positive_label=1)\n self.loss_function = BCELoss()\n\n def gated_atten(self, vt_1, atten_input):\n \"\"\"\n gated attention block\n :param vt_1: v_t-1\n :param atten_input: [h1, h2, ... ,h_n-1]\n :return: v_t\n \"\"\"\n # [batch_size, 1, embedding_size]\n out_at, _ = self.attention(atten_input, vt_1)\n # [batch_size, embedding_size]\n h_e = torch.sum(out_at * atten_input, dim=1)\n # [batch_size, 1]\n z = (self.sigmoid(self.W_z(h_e.unsqueeze(1)) + self.U_z(vt_1))).squeeze(1)\n # [batch_size, 1]\n r = (self.sigmoid(self.W_r(h_e.unsqueeze(1)) + self.U_r(vt_1))).squeeze(1)\n # [batch_size, 1]\n h = self.tanh(self.W(h_e.unsqueeze(1)) + self.U((torch.mul(r, vt_1.squeeze(1))).unsqueeze(1))).squeeze(1)\n # [baych_size, 1, embedding_size]\n vt = (torch.mul((1 - z), vt_1.squeeze(1)) + torch.mul(z, h)).unsqueeze(1)\n\n return vt\n\n @overrides\n def forward(self,\n trigger_0: Dict[str, torch.LongTensor],\n trigger_agent_0: Dict[str, torch.LongTensor],\n agent_attri_0: Dict[str, torch.LongTensor],\n trigger_object_0: Dict[str, torch.LongTensor],\n object_attri_0: Dict[str, torch.LongTensor],\n trigger_1: Dict[str, torch.LongTensor],\n trigger_agent_1: Dict[str, torch.LongTensor],\n agent_attri_1: Dict[str, torch.LongTensor],\n trigger_object_1: Dict[str, torch.LongTensor],\n object_attri_1: Dict[str, torch.LongTensor],\n trigger_2: Dict[str, torch.LongTensor],\n trigger_agent_2: Dict[str, torch.LongTensor],\n agent_attri_2: Dict[str, torch.LongTensor],\n trigger_object_2: Dict[str, torch.LongTensor],\n object_attri_2: Dict[str, torch.LongTensor],\n trigger_3: Dict[str, torch.LongTensor],\n trigger_agent_3: Dict[str, torch.LongTensor],\n agent_attri_3: Dict[str, torch.LongTensor],\n trigger_object_3: Dict[str, torch.LongTensor],\n object_attri_3: Dict[str, torch.LongTensor],\n trigger_4: Dict[str, torch.LongTensor],\n trigger_agent_4: Dict[str, torch.LongTensor],\n agent_attri_4: Dict[str, torch.LongTensor],\n trigger_object_4: Dict[str, torch.LongTensor],\n object_attri_4: Dict[str, torch.LongTensor],\n event_type: Dict[str, torch.LongTensor],\n label: torch.LongTensor = None) -> Dict[str, torch.Tensor]:\n\n # tri, e: [batch_size, 1, embedding_size]\n tri0, e0 = self.event_embedding(trigger_0, trigger_agent_0, trigger_object_0)\n tri1, e1 = self.event_embedding(trigger_1, trigger_agent_1, trigger_object_1)\n tri2, e2 = self.event_embedding(trigger_2, trigger_agent_2, trigger_object_2)\n tri3, e3 = self.event_embedding(trigger_3, trigger_agent_3, trigger_object_3)\n tri4, e4 = self.event_embedding(trigger_4, trigger_agent_4, trigger_object_4)\n\n # [batch_size, seq_Len, embedding_size]\n e = (torch.stack([e0, e1, e2, e3, e4], dim=1)).squeeze(2)\n\n # [batch_size, 1, embedding_size]\n vt = tri4\n\n for i in range(self.args.hop_num):\n # [batch_size, 1, embedding_size]\n vt = self.gated_atten(vt, e)\n\n # [batch_size, embedding_size]\n x = vt.view(vt.size(0), -1)\n # [batch_size, 1] , [batch_size], [batch_size, label_size]\n score, logits, logits_f1 = self.score(x, tri4)\n\n output = {\"logits\": logits,\n \"score\": score}\n if label is not None:\n self.accuracy(logits, label)\n self.f1_score(logits_f1, label)\n output[\"loss\"] = self.loss_function(score.squeeze(1), label.float())\n\n return output\n\n @overrides\n def get_metrics(self, reset: bool = False) -> Dict[str, float]:\n accuracy = self.accuracy.get_metric(reset)\n precision, recall, f1_measure = self.f1_score.get_metric(reset)\n return {\n \"accuracy\": accuracy,\n \"precision\": precision,\n \"recall\": recall,\n \"f1_measure\": f1_measure\n }\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n args = parser.parse_args()\n\n args.embedding_size = 300\n args.learning_rate = 1e-4\n args.batch_size = 1\n args.epochs = 2\n args.patience = 1\n args.cuda_device = -1\n args.hidden_size = 8\n args.hop_num = 6\n args.label_num = 2\n args.threshold = 0.5\n\n # load data\n reader = EventDataReader()\n train_dataset = ensure_list(reader.read(os.path.join('..', 'dataset', 'example', 'train.data')))\n eval_dataset = ensure_list(reader.read(os.path.join('..', 'dataset', 'example', 'train.data')))\n test_dataset = ensure_list(reader.read(os.path.join('..', 'dataset', 'example', 'train.data')))\n\n # get vocabulary and embedding\n vocab = Vocabulary.from_instances(train_dataset + eval_dataset + test_dataset,\n min_count={\"trigger_0\": 0,\n \"trigger_agent_0\": 0,\n \"agent_attri_0\": 0,\n \"trigger_object_0\": 0,\n \"object_attri_0\": 0,\n \"trigger_1\": 0,\n \"trigger_agent_1\": 0,\n \"agent_attri_1\": 0,\n \"trigger_object_1\": 0,\n \"object_attri_1\": 0,\n \"trigger_2\": 0,\n \"trigger_agent_2\": 0,\n \"agent_attri_2\": 0,\n \"trigger_object_2\": 0,\n \"object_attri_2\": 0,\n \"trigger_3\": 0,\n \"trigger_agent_3\": 0,\n \"agent_attri_3\": 0,\n \"trigger_object_3\": 0,\n \"object_attri_3\": 0,\n \"trigger_4\": 0,\n \"trigger_agent_4\": 0,\n \"agent_attri_4\": 0,\n \"trigger_object_4\": 0,\n \"object_attri_4\": 0})\n\n # load pre-trained word vector\n word_vector_path = os.path.join('..', 'dataset', 'sgns.event')\n word_vector = gensim.models.KeyedVectors.load_word2vec_format(word_vector_path)\n pretrained_weight = np.array([[0.00] * args.embedding_size] * vocab.get_vocab_size())\n\n for i in range(vocab.get_vocab_size()):\n word = vocab.get_token_from_index(i, 'tokens')\n if word in word_vector.vocab:\n pretrained_weight[vocab.get_token_index(word)] = word_vector[word]\n del word_vector\n\n token_embedding = Embedding(num_embeddings=vocab.get_vocab_size('tokens'),\n embedding_dim=args.embedding_size, _weight=torch.from_numpy(pretrained_weight).float())\n word_embeddings = BasicTextFieldEmbedder({\"tokens\": token_embedding})\n\n model = GMN(args, word_embeddings, vocab)\n optimizer = optim.Adam(model.parameters(), lr=args.learning_rate, weight_decay=1e-5)\n iterator = BucketIterator(batch_size=args.batch_size, sorting_keys=[(\"trigger_0\", \"num_tokens\")])\n iterator.index_with(vocab)\n\n trainer = Trainer(model=model,\n optimizer=optimizer,\n iterator=iterator,\n train_dataset=train_dataset,\n validation_dataset=eval_dataset,\n num_epochs=args.epochs,\n patience=args.patience, # stop training before loss raise\n cuda_device=args.cuda_device, # cuda device id\n )\n\n # start train\n metrics = trainer.train()\n","repo_name":"RingBDStack/KGEvetPred","sub_path":"models/GMN.py","file_name":"GMN.py","file_ext":"py","file_size_in_byte":10475,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"32411202546","text":"num=int(input('digite um numero:'))\nprint('''esolha uma das opcçoes\n[1] converter para binário\n[2] converter para octal\n[3] converter para hexadecimal''')\nopção=int(input('qual opção escolhida:'))\nif opção == 1:\n print('a conversao de {} é {}'.format(num,bin(num)))\nelif opção == 2:\n print('a conversao de {} é {}'.format(num,oct(num)))\nelif opção == 3:\n print('{} convertido é {}'.format(num,hex(num)))\nelse:\n print(' tente novamente ')\n\n","repo_name":"Brunoempke/Python-exercicios","sub_path":"ex037.py","file_name":"ex037.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35216234927","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Page',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),\n ('added_at', models.DateTimeField()),\n ('deleted', models.BooleanField(default=False)),\n ('added_by', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Project',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),\n ('added_at', models.DateTimeField()),\n ('name', models.CharField(max_length=255)),\n ('added_by', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Version',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),\n ('added_at', models.DateTimeField()),\n ('title', models.CharField(max_length=255)),\n ('content', models.TextField()),\n ('added_by', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ('page', models.ForeignKey(to='docs.Page')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='page',\n name='project',\n field=models.ForeignKey(to='docs.Project'),\n preserve_default=True,\n ),\n ]\n","repo_name":"deanrock/docs-app","sub_path":"docs/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12629173793","text":"\"\"\"\ndefine the url routes of core api\n\"\"\"\n\nfrom django.urls import path\n\nfrom core.api.auth import login, register, getUserInfo, modifyUserInfo, modifyUserPassword\nfrom core.api.bill_acquire import create_bill, query_bill, modify_bill, delete_bill, query_all_bill\nfrom core.api.card_management import create_card, query_card, modify_card, delete_card\nfrom core.api.bill_statistics import list_capital_flow, list_highest_bill, list_total_capital_flow\nfrom core.api.user_management import (add_following_company,\n list_normal_user_information,\n list_company_information,\n list_all_company_info,\n list_suggest_company_info,\n get_user_name,\n get_company_status,\n get_user_status)\nfrom core.api.article_management import (list_company_article,\n list_spec_company_article,\n require_article_content,\n create_company_article,\n modify_company_article,\n delete_company_article)\nfrom core.api.comment_ctrl import (create_comment,\n query_comment,\n delete_comment)\n\n\nurlpatterns = [\n path('bill/create', create_bill),\n path('bill/query', query_bill),\n path('bill/query-reversed', query_all_bill),\n path('bill/modify', modify_bill),\n path('bill/delete', delete_bill),\n path('bill/check_cost', list_capital_flow),\n path('bill/check_high', list_highest_bill),\n path('bill/check_allyear', list_total_capital_flow),\n path('card/create', create_card),\n path('card/query', query_card),\n path('card/modify', modify_card),\n path('card/delete', delete_card),\n path('user/add_following_company', add_following_company),\n path('user/check_self', list_normal_user_information),\n path('user/check_self_company', list_company_information),\n path('user/list_company', list_all_company_info),\n path('user/list_suggest_company', list_suggest_company_info),\n path('user/name', get_user_name),\n path('user/company_status', get_company_status),\n path('user/user_status', get_user_status),\n path('article/list_article', list_company_article),\n path('article/list_spec_article', list_spec_company_article),\n path('article/require_article', require_article_content),\n path('article/create_article', create_company_article),\n path('article/modify_article', modify_company_article),\n path('article/delete_article', delete_company_article),\n path('comment/create', create_comment),\n path('comment/query', query_comment),\n path('comment/delete', delete_comment),\n path('token-auth', login),\n path('register', register),\n path('user/info', getUserInfo),\n path('user/info_modify', modifyUserInfo),\n path('user/password_modify', modifyUserPassword),\n]\n","repo_name":"CPUmaker/finance-mng-app","sub_path":"DB-backend/core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21419803363","text":"import unittest\nimport os\nimport logging\n\nfrom image_searcher.search.search import Search\n\n\nclass TestSearch(unittest.TestCase):\n def setUp(self):\n logging.basicConfig(filename=None, level=logging.INFO)\n self.image_dir_path = \"./tests/test_data\"\n self.tearDown()\n\n def tearDown(self) -> None:\n embed_path = os.path.join(self.image_dir_path, \"stored_embeddings.pickle\")\n if os.path.isfile(embed_path):\n logging.info(f\"Removing {embed_path}\")\n os.remove(embed_path)\n\n def test_searcher(self):\n self.searcher = Search(image_dir_path=self.image_dir_path, include_faces=True)\n ranked_images = self.searcher.rank_images(\"A photo of a fast vehicle.\")\n self.assertIsInstance(ranked_images, list)\n\n self.searcher = Search(image_dir_path=None, save_path=self.image_dir_path, include_faces=True)\n ranked_images = self.searcher.rank_images(\"A photo of a fast vehicle.\")\n self.assertIsInstance(ranked_images, list)\n print(ranked_images)\n\n def test_face_searcher(self):\n self.searcher = Search(image_dir_path=self.image_dir_path, include_faces=True)\n ranked_images = self.searcher.rank_images_by_faces(image_path=os.path.join(self.image_dir_path, \"profile.jpg\"))\n self.assertIsInstance(ranked_images, list)\n\n self.searcher = Search(image_dir_path=None, save_path=self.image_dir_path, include_faces=True)\n ranked_images = self.searcher.rank_images_by_faces(image_path=os.path.join(self.image_dir_path, \"friends.jpg\"))\n self.assertIsInstance(ranked_images, list)\n print(ranked_images)\n\n","repo_name":"ManuelFay/ImageSearcher","sub_path":"tests/test_search/test_search_faces.py","file_name":"test_search_faces.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"21"} +{"seq_id":"43510707265","text":"#!/usr/bin/env python3\n\n\"\"\"\nProgram written by Mattias Kockum\nOn the 15/7/2020\nThe aim of this program is to create an AI\nThe training is parallelized\n\"\"\"\n\n# Necessary\nfrom AI import *\nimport pyopencl as cl\nfrom GPU_code_maker import *\n# Useful for compiling Network in machine code\nimport os\n# Useful for easy data visualisation\nimport matplotlib.pyplot as plt\n\n# parallelization functions\n\ndef extend(array, n=1):\n r = []\n for i in array:\n for j in range(n):\n r.append(copy.deepcopy(i))\n return(r)\n\ndef mean(array, n=1):\n r = []\n array += [0]*(-len(array)%n)\n for i in range(0, len(array), n):\n r.append(sum(array[i:i+n])/n)\n return(r)\n\n# Members are put in form of a list [biais...weights...]\n# (size = nb_neurons*(nb_neurons + 1))\n\ndef prob_reproduction(X):\n \"\"\"\n A weird looking function for parallelization\n X[0] is a group of members\n X[1] is their respective probability of being copied\n X[2] = mutation_coefficent\n X[3] = mutation_amplitude\n returns the mutation of the chosen one\n \"\"\"\n return(np.random.choice(X[0], p=X[1]).mutate(X[2], X[3]))\n\ndef evaluate(X):\n \"\"\"\n Another weird looking function\n X[0] is a problem\n X[1] is a network\n returns the score of the network\n \"\"\"\n np.random.seed()\n X[0].reset()\n X[1].reset()\n return_value = X[0].experience(X[1])\n return(return_value)\n\ndef pooled_evolution(X):\n \"\"\"\n Another one\n \"\"\"\n r = X[0].evolve(X[1], X[2])\n return(r, (X[0].members[0], r))\n\n\nclass GPU_Problem(Problem):\n \"\"\"\n The frame of any \"live\" problem, but in GPU\n \"\"\"\n def Kernel_code(self):\n return(\"This should be some kernel code\")\n\n\nclass GPU_Herd(Herd):\n \"\"\"\n Herd of networks that evolve by reproducing\n \"\"\"\n def make_members(self, kwargs):\n self.members = [\n GPU_Network(self.nb_sensors, self.nb_actors, self.nb_add_neurons,\n self.period, self.function, self.reset_after_process,\n **kwargs)\n for i in range(self.size)\n ]\n\n def evolve(self, problem, nb_generations=1):\n \"\"\"\n The idea is to make the AI evolve by aproximating the gradient descent\n Opens and closes the score output file multiple times so that it's\n possible to see what's going on in during the training\n \"\"\"\n # Saving problem\n if problem == None:\n # The empty problem, just here for quick tests\n self.Problem = GPU_Problem()\n else:\n self.Problem = problem\n # Paralellization values\n platform = cl.get_platforms()[0]\n device = platform.get_devices()[0]\n context = cl.Context([device])\n queue = cl.CommandQueue(context)\n mf = cl.mem_flags\n definitions = defines(self.nb_sensors, self.nb_actors,\n self.nb_add_neurons, self.period,\n self.function.__name__)\n AI_code = C_to_string(\"Kernel_AI.c\")\n code = definitions + AI_code + self.Problem.Kernel_code()\n kernel_program = cl.Program(context, code).build()\n # Opening score file\n score_file = open(self.Problem.__name__() + \"_score\" + self.date, \"w\")\n score_file.write(\n \"score\\n\"\n + \"number of added neurons : {}\\n\".format(self.nb_add_neurons)\n + \"period : {}\\n\".format(self.period)\n + \"size : {}\\n\".format(self.size)\n + \"mutation coefficent : {}\\n\".format(self.mutation_coefficent)\n + \"mutation_amplitude : {}\\n\".format(self.mutation_amplitude)\n + \"number of tests : {}\\n\".format(self.nb_tests)\n + \"number of generations to proceed : {}\\n\".format(nb_generations)\n )\n score_file.close()\n # Display instruction\n \"\"\"\n for pb in self.Problem_pool:\n pb.do_display = False\n if self.do_display:\n self.Problem_pool[0].do_display = True\n \"\"\"\n # Evolution\n for generation in range(nb_generations):\n # Initialization of values and buffers\n Problem_inputs = self.Problem.Kernel_inputs(self.size*self.nb_tests)\n Network_inputs = []\n for member in self.members:\n Network_values = []\n for value in member.flatten():\n Network_values.append(value)\n Network_inputs.append(Network_values)\n Kernel_inputs = []\n for i in range(self.nb_tests*self.size):\n Kernel_input = []\n Kernel_input += Problem_inputs[i]\n Kernel_input += Network_inputs[i%(self.nb_tests*self.size)]\n Kernel_inputs.append(Kernel_input)\n Kernel_inputs_buffer = cl.Buffer(context,\n mf.READ_ONLY | mf.COPY_HOST_PTR,\n hostbuf=np.array(Kernel_inputs))\n score = np.zeros((self.size*self.nb_tests, ))\n score_buffer = cl.Buffer(context, mf.WRITE_ONLY, score.nbytes)\n # Evaluation of performances\n kernel_program.experience(\n queue, Kernel_inputs.shape, None,\n Kernel_inputs_buffer, score_buffer)\n # Reproduction (with mutation) of Networks\n self.reproduce(proba_reproduction)\n # Saves the scores\n self.max_score = max(self.score)\n self.max_score_index = self.score.index(self.max_score)\n self.array_scores.append(self.max_score)\n # Saves one Network and the score evolution\n self.members[self.max_score_index].save(\n self.Problem.__name__() + \"_Network\" + self.date, \"w\", False)\n score_file = open(\n self.Problem.__name__() + \"_score\" + self.date, \"a\"\n )\n score_file.write(\n \"generation n° {} : {} \\n\".format(\n generation, str(self.max_score)))\n score_file.close()\n score_file = open(self.Problem.__name__() + \"_score\" + self.date, \"a\")\n score_file.write(\"End\\n\")\n score_file.close()\n return(self.array_scores)\n\n def reproduce(self, proba_reproduction):\n \"\"\"\n The copy of the successful networks with mutation\n parallelized\n \"\"\"\n pool = mp.Pool()\n new_members = (\n pool.map(\n prob_reproduction,\n [(\n self.members,\n proba_reproduction,\n self.mutation_coefficent,\n self.mutation_amplitude\n )]*self.size\n )\n )\n self.members = new_members\n pool.close()\n\n def modif_score(self, score):\n \"\"\"\n Modifies the scores to make them useable in probability\n \"\"\"\n # I put the np.array in case the score isn't an array\n score = np.array(score)\n score = score - min(score)\n if list(score) == list(np.zeros(self.size)):\n # if evey Network has a score of zero they reproduce with equal\n # proability\n score = np.ones(self.size)\n return(score/sum(score))\n\nclass GPU_Network(Network):\n \"\"\"\n A neural network, but won directional\n it has input, output, and hidden neurons\n and can process the input multiple time, sometime making it pass through\n the same neurons more than once.\n I expect it to make them faster, smaller, and capable of making faster\n \"life or death\" decisions given the fact that the input neurons are in\n direct contact with the output neurons (and some other neurons make a short\n cut too), so I believe the Network will have both fast and slow thinking.\n This is how the neurons are placed\n [\n input : [ slowest thinking]\n [ ]\n [ ]\n [fastest thinking ]\n output\n ]\n \"\"\"\n def flatten(self):\n return(list(self.bias)\n + list(self.weights.reshape((self.nb_neurons**2,))))\n\n\nclass TestBench(object):\n \"\"\"\n A test bench to verify everything works fine\n \"\"\"\n def __init__(\n self,\n problem,\n nb_herds = 1,\n nb_generations = 20,\n nb_add_neurons = 9,\n period = 1,\n function = segments,\n reset_after_process = True,\n size = 100,\n mutation_coefficent = 0.0001,\n mutation_amplitude = 0.01,\n nb_tests = 2,\n do_display_execution = False,\n display_results_mode = None,\n **kwargs\n ):\n self.kwargs = kwargs\n if \"slices\" in kwargs:\n self.nb_add_neurons = sum(kwargs[\"slices\"][1:-1])\n else:\n self.nb_add_neurons = nb_add_neurons\n self.series = []\n self.problem = problem\n self.nb_sensors = problem.nb_sensors\n self.nb_actors = problem.nb_actors\n self.period = period\n self.colors = [\"r\", \"g\", \"b\", \"c\", \"m\", \"y\", \"k\"]\n self.nb_herds = nb_herds\n self.nb_generations = nb_generations\n self.size = size\n self.mutation_coefficent = mutation_coefficent\n self.mutation_amplitude = mutation_amplitude\n self.nb_tests = nb_tests\n self.do_display_execution = do_display_execution\n self.display_results_mode = display_results_mode\n self.values_simple = self.nb_herds*[1]\n self.values_nb_add_neurons = [0, 1, 2, 3, 4, 5, 6]\n self.values_period = [1, 2, 3, 4, 5, 6, 7]\n self.values_sizes = [5, 10, 50, 100, 500, 1000]\n self.values_mutation_coefficients = [0.0001, 0.000005, 0.00001]\n self.values_mutation_amplitude = [0.01, 0.005, 0.001]\n self.values_nb_tests = [2, 4, 8, 16, 32, 64, 128, 256, 512]\n self.archives = []\n\n def test(self, mode = \"simple\", nb_generations = None, values = None):\n if nb_generations == None:\n nb_generations = self.nb_generations\n base = [\n self.nb_sensors,\n self.nb_actors,\n self.nb_add_neurons,\n self.period,\n function,\n reset_after_process,\n self.size,\n self.mutation_coefficent,\n self.mutation_amplitude,\n self.nb_tests,\n self.do_display_execution\n ]\n if mode in [0, \"simple\"]:\n if values == None:\n values = self.values_simple\n array_inputs = np.array(\n [base for i in range(len(values))],\n dtype = object\n )\n elif mode in [1, \"nb_add_neurons\"]:\n if values == None:\n values = self.values_nb_add_neurons\n array_inputs = np.array(\n [base for i in range(len(values))],\n dtype = object\n )\n array_inputs[:,2] = values\n elif mode in [2, \"period\"]:\n if values == None:\n values = self.period\n array_inputs = np.array(\n [base for i in range(len(values))],\n dtype = object\n )\n array_inputs[:,3] = values\n elif mode in [3, \"size\"]:\n if values == None:\n values = self.values_sizes\n array_inputs = np.array(\n [base for i in range(len(values))],\n dtype = object\n )\n array_inputs[:,6] = values\n elif mode in [4, \"coefficient_mutation\"]:\n if values == None:\n values = self.values_mutation_coefficients\n array_inputs = np.array(\n [base for i in range(len(values))],\n dtype = object\n )\n array_inputs[:,7] = values\n elif mode in [5, \"coefficient_amplitude\"]:\n if values == None:\n values = self.values_mutation_amplitude\n array_inputs = np.array(\n [base for i in range(len(values))],\n dtype = object\n )\n array_inputs[:,8] = values\n elif mode in [6, \"nb_tests\"]:\n if values == None:\n values = self.values_nb_tests\n array_inputs = np.array(\n [base for i in range(len(values))],\n dtype = object\n )\n array_inputs[:,9] = values\n elif mode in [7, \"multiple\"]:\n if values == None:\n raise(ValueError(\"An array must be in input\"))\n array_inputs = np.array(\n [base for i in range(len(values))],\n dtype = object\n )\n array_inputs = values\n # Pre-display\n test_colors = np.array([[self.colors[i%len(self.colors)]]\n for i in range(len(values))])\n test_values = np.concatenate((array_inputs[:,2:-1], test_colors),\n axis = 1)\n self.display_table(test_values)\n # Starts learning !\n for i in range(len(values)):\n H = Herd(*array_inputs[i], **self.kwargs)\n self.series.append(H.evolve(self.problem, nb_generations))\n self.archives.append([H.members[0], self.series])\n # display\n if self.display_results_mode != None:\n if self.display_results_mode in [0, \"console\"]:\n self.display_console()\n if self.display_results_mode in [1, \"plot\"]:\n self.display_plot()\n # reset the self.series for if it is needed after\n self.series = []\n\n def display_table(self, Variables_values):\n Variables_name_1 = np.array([\"nb of added\", \"\",\n \"herd's\", \"mutation\",\n \"mutation\", \"nb of\", \"\"])\n Variables_name_2 = np.array([\"neurons\", \"period\",\n \"size\", \"coefficent\", \"amplitude\",\n \"tests\", \"color\"])\n form =\"{:<12} {:<7} {:<7} {:<11} {:<11} {:<7} {:<0}\"\n for i in [Variables_name_1, Variables_name_2, *Variables_values]:\n print(form.format(*i))\n\n def display_console(self, archive = False):\n if archive:\n display_series = [i[1][0] for i in self.archives]\n else:\n display_series = self.series\n for indice, serie in enumerate(display_series):\n color = self.colors[indice%len(self.colors)]\n print(\"-- serie n°: {} -- color : {} --\\n\".format(indice, color))\n for i in serie:\n print(\" {}\".format(i))\n\n def display_plot(self, archive = False):\n if archive:\n display_series = [i[1][0] for i in self.archives]\n else:\n display_series = self.series\n for indice, serie in enumerate(display_series):\n color = self.colors[indice%len(self.colors)]\n plt.plot(\n [k for k in range(len(serie))],\n serie,\n color+\"-*\"\n )\n plt.show()\n","repo_name":"MattiasKockum/NDNN","sub_path":"GPU/GPU_AI.py","file_name":"GPU_AI.py","file_ext":"py","file_size_in_byte":15139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18207343402","text":"class Solution:\n def killProcess(self, pid: List[int], ppid: List[int], kill: int) -> List[int]:\n ans = []\n tree = collections.defaultdict(list)\n\n for v, u in zip(pid, ppid):\n if u == 0:\n continue\n tree[u].append(v)\n\n def dfs(u: int) -> None:\n ans.append(u)\n for v in tree.get(u, []):\n dfs(v)\n\n dfs(kill)\n return ans\n","repo_name":"walkccc/LeetCode","sub_path":"solutions/0582. Kill Process/0582.py","file_name":"0582.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":756,"dataset":"github-code","pt":"21"} +{"seq_id":"3780265381","text":"\"\"\"\r\nfile = open('18_Test.txt', mode = 'r')\r\n\r\nline = file.readline()\r\n# readline() will give only the first line of file\r\n#(file.readlines())\r\n#whereas readlines() gives an array of lines in the text file\r\nprint(line)\r\n\r\nfile.close()\r\n\"\"\"\r\n# Now open files using with open()\r\n# with open() is better at exception handelling\r\nwith open('18_Test.txt', 'r') as file:\r\n data = file.readline()\r\n print(data)\r\n\r\n\r\n","repo_name":"ap4ashutosh/Meta_backend_development_python_course","sub_path":"19_file_handelling.py","file_name":"19_file_handelling.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34092702714","text":"\"\"\"\nPlot 1D entropy in the time or frequency domain\n\"\"\"\n\nfrom typing import Optional\nimport numpy as np\nfrom scipy.signal import welch\nfrom scipy.fft import rfft, rfftfreq\nimport matplotlib.pyplot as plt\nimport matplotlib.lines as mlines\nfrom quantum_inferno import info\n\n\ndef plot_fft_compare(sig_in,\n fs_hz: float,\n duration_no_pad_points: int,\n welch_points: int,\n overlap_points: int,\n fmin_hz: float = 2E-5,\n fmax_hz: float = 1E-2,\n station_str: Optional[str] = None,\n do_show: bool = True,\n do_save: bool = False,\n save_path: str = None):\n \"\"\"\n Comparisons for time and fft isnr and esnr\n :param sig_in:\n :param fs_hz:\n :param duration_no_pad_points:\n :param welch_points:\n :param overlap_points:\n :param fmin_hz: Plotting min frequency limit\n :param fmax_hz: Plotting max frequency limit\n :param station_str: Station ID\n :param do_show:\n :param do_save:\n :param save_path:\n :return:\n \"\"\"\n\n time_idx = duration_no_pad_points\n # TODO: Clean up, this is silly\n # TODO: Build the distribution only from the portion of the spectrum that was used\n sig_in_no_pad = sig_in[0:time_idx]\n time = np.arange(len(sig_in_no_pad))/fs_hz/3600\n\n [tdr_sig, _, _, _, tdr_isnr, tdr_esnr] = \\\n info.shannon_tdr(sig_in_real=sig_in_no_pad)\n\n rfft_sig1 = rfft(x=sig_in_no_pad)\n rfft_freq1 = rfftfreq(n=len(sig_in_no_pad), d=1/fs_hz)\n [fft_marginal1, fft_angle_rads1, _, _, fft_isnr1, fft_esnr1] = \\\n info.shannon_fft(fft_sig=rfft_sig1)\n\n rfft_sig2 = rfft(x=sig_in)\n rfft_freq2 = rfftfreq(n=len(sig_in), d=1/fs_hz)\n [fft_marginal2, fft_angle_rads2, _, _, fft_isnr2, fft_esnr2] = \\\n info.shannon_fft(fft_sig=rfft_sig2)\n\n # Use zero padding\n rfft_freq3, welch_power = welch(x=sig_in_no_pad, fs=fs_hz,\n nperseg=welch_points, noverlap=overlap_points,\n nfft=len(sig_in))\n [fft_marginal3, _, _, _, fft_isnr3, fft_esnr3] = \\\n info.shannon_fft(fft_sig=np.sqrt(welch_power))\n\n # set the font size\n fontsz = 'x-large'\n # Initialize subplots\n fig, ax = plt.subplots(3, 2)\n fig.set_size_inches(16, 8)\n\n # Axis numbering line\n ref_line = mlines.Line2D([], [])\n\n ax[2, 0].plot(time, tdr_sig)\n ax[2, 0].set_ylabel('Norm Signal', fontsize=fontsz)\n ax[2, 0].set_xlabel('Time, hours', fontsize=fontsz)\n ax[2, 0].set_xlim(time[0], time[time_idx-1])\n ax[2, 0].tick_params(labelsize='x-large')\n ax[2, 0].grid(True)\n if station_str is None:\n label_a = '(a)'\n else:\n label_a = '(a)' + station_str\n\n ax[2, 0].legend(handles=[ref_line], labels=[label_a], loc=2,\n frameon=False, handlelength=0, handletextpad=-0.5, fontsize='x-large')\n\n ax[1, 0].plot(time, tdr_isnr, '.')\n ax[1, 0].set_xticklabels([])\n ax[1, 0].set_ylim(-8, 8)\n ax[1, 0].set_xlim(time[0], time[time_idx-1])\n ax[1, 0].tick_params(labelsize='x-large')\n ax[1, 0].set_ylabel('Information SNR', fontsize=fontsz)\n ax[1, 0].grid(True)\n ax[1, 0].legend(handles=[ref_line], labels=['(b)'], loc=2,\n frameon=False, handlelength=0, handletextpad=-0.5, fontsize='x-large')\n\n ax[0, 0].plot(time, tdr_esnr, '.')\n ax[0, 0].set_xticklabels([])\n ax[0, 0].set_ylabel('Entropy SNR', fontsize=fontsz)\n ax[0, 0].set_xlim(time[0], time[time_idx-1])\n ax[0, 0].tick_params(labelsize='x-large')\n ax[0, 0].grid(True)\n ax[0, 0].legend(handles=[ref_line], labels=['(c)'], loc=2,\n frameon=False, handlelength=0, handletextpad=-0.5, fontsize='x-large')\n\n # Phase can be a bit boring\n # ax[2, 1].semilogx(rfft_freq1, fft_angle_rads1, '.', base=10, label='FFT')\n # ax[2, 1].semilogx(rfft_freq2, fft_angle_rads2, '.', base=10, label='FFT Padded')\n # ax[2, 1].set_xlim(fmin_hz, fmax)\n # # ax[2, 1].set_ylim(angle_min, angle_max+10)\n # ax[2, 1].set_ylim(-500, 50)\n # ax[2, 1].set_ylabel('Phase, rad', fontsize=fontsz)\n # ax[2, 1].set_xlabel('Frequency, Hz', fontsize=fontsz)\n # ax[2, 1].tick_params(labelsize='x-large')\n # ax[2, 1].grid(True)\n # panel_number = ax[2, 1].legend(handles=[ref_line], labels=['(d)'], loc=2,\n # frameon=False, handlelength=0, handletextpad=-0.5, fontsize='x-large')\n # ax[2, 1].legend(loc=1)\n # ax[2, 1].add_artist(panel_number)\n\n ax[2, 1].semilogx(rfft_freq1, fft_marginal1, '--', base=10, label='FFT')\n ax[2, 1].semilogx(rfft_freq2, fft_marginal2, '.', base=10, label='FFT Padded')\n ax[2, 1].semilogx(rfft_freq3, fft_marginal3, '.', base=10, label='Welch Padded')\n ax[2, 1].set_xlim(fmin_hz, fmax_hz)\n ax[2, 1].set_ylabel('Marginals', fontsize=fontsz)\n ax[2, 1].set_xlabel('Frequency, Hz', fontsize=fontsz)\n ax[2, 1].tick_params(labelsize='x-large')\n ax[2, 1].grid(True)\n panel_number = ax[2, 1].legend(handles=[ref_line], labels=['(d)'], loc=2,\n frameon=False, handlelength=0, handletextpad=-0.5, fontsize='x-large')\n ax[2, 1].legend(loc=1)\n ax[2, 1].add_artist(panel_number)\n\n ax[1, 1].semilogx(rfft_freq1, fft_isnr1, '--', base=10, label='FFT')\n ax[1, 1].semilogx(rfft_freq2, fft_isnr2, '.', base=10, label='FFT Padded')\n ax[1, 1].semilogx(rfft_freq3, fft_isnr3, '.', base=10, label='Welch Padded')\n ax[1, 1].set_xticklabels([])\n ax[1, 1].set_ylabel('Information SNR', fontsize=fontsz)\n ax[1, 1].set_ylim(-4, 12)\n ax[1, 1].set_xlim(fmin_hz, fmax_hz)\n ax[1, 1].tick_params(labelsize='x-large')\n ax[1, 1].grid(True)\n panel_number = ax[1, 1].legend(handles=[ref_line], labels=['(e)'], loc=2,\n frameon=False, handlelength=0, handletextpad=-0.5, fontsize='x-large')\n ax[1, 1].legend(loc=1)\n ax[1, 1].add_artist(panel_number)\n\n ax[0, 1].semilogx(rfft_freq1, fft_esnr1, '.', base=10, label='FFT')\n ax[0, 1].semilogx(rfft_freq2, fft_esnr2, '.', base=10, label='FFT Padded')\n ax[0, 1].semilogx(rfft_freq3, fft_esnr3, '.', base=10, label='Welch Padded')\n # NOTE: Below returns isnr because log2(plog2(p)) = log2(p) + log2(log2(p)) ~ log2p\n # ax[0, 1].semilogx(rfft_freq1, np.log2(fft_esnr1), '--', base=10, label='FFT')\n # ax[0, 1].semilogx(rfft_freq2, np.log2(fft_esnr2), '.', base=10, label='FFT Padded')\n # ax[0, 1].semilogx(rfft_freq3, np.log2(fft_esnr3), '.', base=10, label='Welch Padded')\n # ax[0, 1].set_ylim(-4, 12)\n ax[0, 1].set_xticklabels([])\n ax[0, 1].set_ylabel('Entropy SNR', fontsize=fontsz)\n # ax[0, 1].set_title(title_label)\n ax[0, 1].set_xlim(fmin_hz, fmax_hz)\n ax[0, 1].tick_params(labelsize='x-large')\n ax[0, 1].grid(True)\n panel_number = ax[0, 1].legend(handles=[ref_line], labels=['(f)'], loc=2,\n frameon=False, handlelength=0, handletextpad=-0.5, fontsize='x-large')\n ax[0, 1].legend(loc=1)\n ax[0, 1].add_artist(panel_number)\n plt.tight_layout()\n\n if do_save:\n if save_path is None:\n print('Save path must be specified! Exiting program.')\n exit()\n fig.savefig(save_path)\n\n if do_show:\n plt.show()\n","repo_name":"ISLA-UH/quantum-inferno","sub_path":"quantum_inferno/plot_templates/plot_entropy_1d.py","file_name":"plot_entropy_1d.py","file_ext":"py","file_size_in_byte":7329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27036967043","text":"import matplotlib.pyplot as plt\nfrom sklearn.datasets import load_breast_cancer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVC\nfrom sklearn.preprocessing import StandardScaler\nimport numpy as np\nimport pdb\n# breast cancer 데이터를 가져온다.\ncancer = load_breast_cancer()\n\n# 표준화\nfeature_data = StandardScaler().fit_transform(cancer.data)\n\n# Train 데이터 세트와 Test 데이터 세트를 구성한다\ntrainX, testX, trainY, testY = train_test_split(feature_data, cancer.target, test_size = 0.2)\n\n# 학습 및 평가\n\"\"\"\nfeature 간 scale이 다름.\n분기를 나누는 DT의 경우에는 표준화가 필요없었지만, \nSVM, kNN의 경우 직선의 방정식을 찾기 위해 연산하므로 표준화 필요\n\"\"\"\nmodel = SVC(kernel='rbf', gamma=1.0, C=0.5)\nmodel.fit(trainX, trainY)\nprint('정확도 =', np.round(model.score(testX, testY), 3))\n\n# gamma와 C의 조합을 바꿔가면서 학습 데이터의 정확도가 최대인 조합을 찾는다\noptAcc = -999\noptG = 0\noptC = 0\nfor gamma in np.arange(0.1, 5.0, 0.1):\n for c in np.arange(0.1, 5.0, 0.1):\n model = SVC(kernel='rbf', gamma=gamma, C=c)\n model.fit(trainX, trainY)\n acc = model.score(testX, testY)\n \n if acc > optAcc:\n optG = gamma\n optC = c\n optAcc = acc\n pdf.set_trace()\n\nprint('optimal gamma = %.2f' % optG)\nprint('optimal C = %.2f' % optC)\nprint('optimal Accuracy = %.2f' % optAcc)\n\n# 최적 조건으로 학습한 결과를 확인한다.\nmodel = SVC(kernel='rbf', gamma=optG, C=optC)\nmodel.fit(trainX, trainY)\n\n# Test 세트의 Feature에 대한 class를 추정하고, 정확도를 계산한다\nprint('\\n')\nprint(\"* 학습용 데이터로 측정한 정확도 = %.2f\" % model.score(trainX, trainY))\nprint(\"* 시험용 데이터로 측정한 정확도 = %.2f\" % model.score(testX, testY))\n\n\"\"\"\n(1) 방법 1\nacc.append(model.score)\nG.append(gamma) \nC.append(c)\n\nidx = np.argmax(acc)\noptC = C[idx]\noptG = G[idx]\noptAcc = acc[idx]\n\n(2) 방법 2 - 따로 append를 하지 않아도 되니깐 메모리를 아낄 수 있다.\noptAcc = -999 # 충분히 작은 값을 넣어줌\noptG = 0\noptC = 0 \n\n(3) 방법 3 - GridSearchCV package\n\"\"\"","repo_name":"dobbytk/NLP_study","sub_path":"Multicampus/ML/day3/rbf_svm_cancer.py","file_name":"rbf_svm_cancer.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71922300213","text":"from functools import cache\nfrom collections import Counter\nfrom pprint import pprint\n\ndef run(p1, p2, dice):\n pos = [p1-1, p2-1]\n score = [0,0]\n s1 = s2 = 0\n i = 0\n while True:\n for p in range(2):\n for throws in range(3):\n pos[p] = (pos[p] + dice[i % len(dice)]) % 10\n i += 1\n score[p] += pos[p] + 1\n #print(score, 'after', i, 'throws')\n if score[p]>= 1000:\n print('player',p,'wins')\n print('score', score[(p+1)%2] * i)\n return\n\nrun(4, 8, range(1,101))\nrun(7, 5, range(1,101))\n\ndef replace(tup, field, value):\n l = list(tup)\n l[field] = value\n return tuple(l)\n\n\noutcomes = Counter()\nfor r1 in range(1,4):\n for r2 in range(1,4):\n for r3 in range(1,4):\n tot = r1+r2+r3\n outcomes[tot] += 1\n\npprint(outcomes)\ndef start2(start=(4,8), goal=1000, verbose=False):\n\n print()\n print('playing with start', start, 'goal', goal)\n @cache\n def run2(score_t, pos_t, round):\n p = round % 2\n wins = [0, 0]\n \n for tot, count in outcomes.items():\n pos_t2 = replace(pos_t, p, (pos_t[p] + tot)%10 )\n score_t2 = replace(score_t, p, score_t[p]+pos_t2[p]+1)\n if verbose:\n print('after turn', p, 'from scores', score_t, 'rolling',tot,'scores', score_t2, 'pos', pos_t2)\n if score_t2[p] >= goal:\n if verbose:\n print('player', p, 'wins!')\n wins[p] += count\n else:\n assert score_t2[0] < goal\n assert score_t2[1] < goal\n if verbose:\n print('recursing with', score_t2, pos_t2,'to run round', round+1)\n nwins = run2(score_t2, pos_t2, (round+1)%2)\n wins[0] += nwins[0] * count\n wins[1] += nwins[1] * count\n if verbose:\n print('from scores', score_t, 'at pos', pos_t, 'wins are', wins)\n return wins\n\n start_pos = ( (start[0]-1, start[1]-1))\n return run2( (0,0), start_pos, 0)\n\nassert start2(goal=3, verbose=True) == [990, 207]\nassert start2(start=(0,0), goal=3, verbose=True) == [27, 0]\nfor pos in [(4,8), (7,5)]:\n r = start2(start=pos, goal=21, verbose=False)\n print(pos, r)\n if pos == (4,8):\n assert r == [444356092776315,341960390180808]\n\n ","repo_name":"dickon/advent_of_code_2021","sub_path":"21/dice.py","file_name":"dice.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18044950793","text":"import tkinter as tk\n\nclass Messagebox:\n def __init__(self,\n parent=None,titulo='ERROR',\n mensaje='ERROR',\n texto_aceptar='Aceptar',\n ancho='300',alto='100'):\n if parent is None:\n return parent\n self._v=tk.Toplevel(parent)\n self._v.title(titulo)\n self._v.geometry(f'{ancho}x{alto}')\n self._lbl_msg=tk.Label(self._v, text=mensaje)\n self._b_aceptar=tk.Button(self._v, text=texto_aceptar,\n command=lambda: self._v.destroy())\n self._lbl_msg.pack(side=tk.TOP,\n padx=(5,5),\n pady=(10,10))\n self._b_aceptar.pack(side=tk.TOP,\n anchor=tk.E,\n padx=(5,5),\n pady=(10,10))\n self._v.bind('', lambda event: self._v.destroy())\n self._v.focus_set()\n\n","repo_name":"toccicristian/clavespy","sub_path":"src/modelos/ventana_msg_modelo.py","file_name":"ventana_msg_modelo.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71836423094","text":"import pyglet\nimport random\nimport time\n\nfrom engine.monster import MonsterSprite\nfrom engine.grid import Grid\nfrom engine.log_spam import LogSpam\nfrom engine.spawner import Spawner\nfrom engine.throwable import Throwable\nfrom engine.tower import TowerSprite\n\n\nclass Game:\n\n CHARACTER_SPEED = 100 # Character movement in pixels per second\n FPS = 30 # frames per second (aka speed of the game)\n THROW_DISTANCE = 2\n\n def __init__(self, window, rows, cols, lives=3):\n pyglet.font.add_file('assets/PressStart2P-Regular.ttf')\n self.font = pyglet.font.load('Press Start 2P')\n\n # Set up batch and ordered groups\n self.bg_batch = pyglet.graphics.Batch()\n self.main_batch = pyglet.graphics.Batch()\n self.background = pyglet.graphics.OrderedGroup(0)\n self.midground = pyglet.graphics.OrderedGroup(1)\n self.terrainground = pyglet.graphics.OrderedGroup(2)\n self.monster_layer = pyglet.graphics.OrderedGroup(3)\n self.finish_ground = pyglet.graphics.OrderedGroup(4)\n self.foreground = pyglet.graphics.OrderedGroup(5)\n\n # Set up window and grid variables\n self.window = window\n self.window_block_width = cols\n self.cols = cols\n self.rows = rows\n self.grid = Grid(self.rows, self.cols, self.window.width)\n\n # Set up background\n bg_image = pyglet.image.load('assets/background/main_bg.png')\n self.bg_sprite = pyglet.sprite.Sprite(\n bg_image,\n batch=self.bg_batch,\n group=self.background\n )\n height_scale = self.window.height / self.bg_sprite.height\n width_scale = self.window.width / self.bg_sprite.width\n self.bg_sprite.scale = max(height_scale, width_scale)\n\n self.lives = lives\n self.draw_life_counter()\n\n # Set up character sprite info\n self.characters = dict()\n self.throwables = list()\n\n # State keeping variables\n self.lock = False\n self.game_over_timer = 3\n self.safety_counter = 10\n self.log_spam = LogSpam()\n\n # Set up environment objects\n self.obstacles = dict()\n self.terrain = dict()\n self.tower_spots = dict()\n self.towers = dict()\n self.spawners = list()\n\n self.finish = None\n self.num_goals = 0\n\n # Lab decision generator\n self.decision_func = None\n\n def on_draw(self):\n self.window.clear()\n self.bg_batch.draw()\n self.main_batch.draw()\n\n def start_game(self):\n self.window.push_handlers(\n on_draw=self.on_draw,\n )\n pyglet.clock.schedule_interval(self.update, 1.0 / self.FPS)\n\n def stop_game(self):\n print('Game shutting down')\n time.sleep(5)\n pyglet.clock.unschedule(self.update)\n self.window.remove_handlers()\n pyglet.app.exit()\n\n def add_decision_func(self, func):\n self.decision_func = func\n\n def set_character_commands(self, commands):\n self.commands = commands\n\n def update(self, dt):\n # Game is over and controls locked\n if self.lock:\n self.game_over_timer -= dt\n if self.game_over_timer <= 0:\n self.stop_game()\n\n game_over = True\n monsters = list()\n for spawner in self.spawners:\n spawner.update(dt)\n if len(spawner.monster_queue) > 0:\n game_over = False\n for monster in spawner.spawned:\n if monster.at_destination():\n spawner.spawned.remove(monster)\n monster.hide()\n self.lives = max(self.lives - 1, 0)\n self.update_life_label(self.lives)\n elif not monster.is_defeated:\n monsters.append(monster)\n\n if len(monsters) > 0:\n game_over = False\n\n if self.lives <= 0:\n if not self.lock:\n print(\"Game Over, you lost all your lives\")\n self.lock = True\n\n if game_over:\n if self.safety_counter > 0:\n self.safety_counter -= 1\n return\n if not self.lock:\n print(\"You Win! With {lives} lives remaining!\".format(\n lives=self.lives))\n self.lock = True\n\n for key in self.towers:\n # Just to throw a wrench into the lab\n random.shuffle(monsters)\n tower = self.towers[key]\n tower.update(dt, monsters)\n\n def _add_terrain(self, path, x, y, group):\n obstacle_image = pyglet.resource.image(path)\n obstacle_image.anchor_x = obstacle_image.width // 2\n obstacle_image.anchor_y = obstacle_image.height // 2\n obstacle_sprite = pyglet.sprite.Sprite(\n obstacle_image,\n batch=self.main_batch,\n x=x,\n y=y,\n group=group\n )\n height_scale = self.grid.cell_length / obstacle_sprite.height\n width_scale = self.grid.cell_length / obstacle_sprite.width\n obstacle_sprite.scale = min(height_scale, width_scale)\n return obstacle_sprite\n\n def add_terrain(self, path, coords):\n for coord_name in coords:\n x, y = self.grid.calculate_xy_from_name(coord_name)\n sprite = self._add_terrain(path, x, y, self.terrainground)\n self.terrain[coord_name] = sprite\n\n def add_finish(self, coord):\n x, y = self.grid.calculate_xy_from_name(coord)\n finish_image = pyglet.resource.image('assets/finish.png')\n finish_image.anchor_x = finish_image.width // 2\n finish_image.anchor_y = finish_image.height // 2\n self.finish = pyglet.sprite.Sprite(\n finish_image,\n batch=self.main_batch,\n x=x,\n y=y + self.grid.cell_length // 4, # Account for offset of road\n group=self.finish_ground\n )\n self.finish.scale = 0.33\n\n def add_tower_spots(self, coords):\n for coord_name in coords:\n x, y = self.grid.calculate_xy_from_name(coord_name)\n sprite = self._add_spot(x, y)\n self.tower_spots[coord_name] = sprite\n\n def _add_spot(self, x, y):\n spot_image = pyglet.resource.image('assets/background/dot.png')\n spot_image.anchor_x = spot_image.width // 2\n spot_image.anchor_y = spot_image.height\n spot_sprite = pyglet.sprite.Sprite(\n spot_image,\n batch=self.main_batch,\n x=x,\n y=y,\n group=self.terrainground\n )\n spot_sprite.scale = 0.5\n return spot_sprite\n\n def add_tower(self, coord):\n x, y = self.grid.calculate_xy_from_name(coord)\n tower_sprite = TowerSprite(x, y, self.main_batch)\n self.towers[coord] = tower_sprite\n\n def add_spawner(self, coord, dest_coord, config):\n self.spawners.append(\n Spawner(config, coord, dest_coord, self.grid,\n self.main_batch, self.monster_layer)\n )\n\n def add_selector_func(self, key, func):\n tower = self.towers[key]\n tower.set_selector_func(func)\n\n def draw_life_label(self):\n self.life_label = pyglet.text.Label(\n text=\"\",\n x=self.window.width * 0.80,\n y=self.window.height * 0.90,\n font_name='Press Start 2P',\n font_size=18,\n anchor_x='left',\n batch=self.main_batch,\n group=self.foreground\n )\n self.life_label.draw()\n\n def draw_life_counter(self):\n self.draw_life_label()\n self.update_life_label(self.lives)\n life_icon = pyglet.resource.image(\n 'assets/heart.png'\n )\n life_icon.anchor_x = life_icon.width * 1.5\n life_icon.anchor_y = life_icon.height * 0.1\n self.life_sprite = pyglet.sprite.Sprite(\n life_icon,\n x=self.window.width * 0.8,\n y=self.window.height * 0.9,\n batch=self.main_batch,\n group=self.foreground\n )\n\n def update_life_label(self, lives):\n self.life_label.text = \"Lives: {lives}\".format(lives=lives)\n","repo_name":"Kokonaut/fundamentals-lab-4","sub_path":"engine/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":8169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33863775472","text":"from base_gui import BaseGui\nfrom tkinter import filedialog, messagebox\nimport util.downlader as downloader\n\n\nclass DownloaderGuiCommonCore(BaseGui):\n Y_COORDINATE_START = 30\n Y_COORDINATE_INCREASE_STEPS = 70\n VIDEO_MEDIA_TYPE = 'video'\n MP3_MEDIA_TYPE = 'mp3'\n SET_DOWNLOAD_PATH_MESSAGE = 'Download directory: PLEASE CHOOSE!'\n\n _trim_start_seconds = None\n _trim_end_seconds = None\n\n def __init__(self, parent_frame, main_frame):\n super().__init__(parent_frame, main_frame)\n\n self._download_path_input_section = self.create_download_path_input_section()\n self._link_input_section = self.create_youtube_link_input_section()\n self._status_display = self.create_status_label()\n\n self._y_coordinate = self.draw_screen(\n self.Y_COORDINATE_START,\n self.Y_COORDINATE_INCREASE_STEPS,\n [self._status_display,\n self._download_path_input_section[0],\n self._download_path_input_section[1],\n self._link_input_section[0],\n self._link_input_section[1]]\n )\n\n def set_download_path(self):\n self._download_path_input_section[0].config(text = filedialog.askdirectory())\n\n def download_single_file(self, mediatype):\n download_path = self._download_path_input_section[0].cget('text')\n if self.SET_DOWNLOAD_PATH_MESSAGE == download_path:\n messagebox.showinfo(self.FAILURE_MESSAGE, 'Please set a download directory!')\n\n else:\n self.set_download_result_message_style('orange')\n\n self.change_status_message(\n downloader.download_single_file(\n self._link_input_section[1].get(),\n download_path,\n mediatype,\n self._trim_start_seconds,\n self._trim_end_seconds\n )\n ) \n\n def set_download_result_message_style(self, label_color):\n self._status_display.config(background = label_color, width = 200, height = 5, font = self.SECONDARY_LABEL_FONT)\n\n def change_status_message(self, status_message):\n if 'Download failed' in status_message:\n self.set_download_result_message_style('red')\n self._status_display.config(text = status_message)\n\n def download_video(self):\n self.download_single_file(self.VIDEO_MEDIA_TYPE)\n\n def download_mp3(self):\n self.download_single_file(self.MP3_MEDIA_TYPE)\n\n def create_status_label(self):\n return self.create_label('Download Status: READY')\n\n def create_youtube_link_input_section(self):\n return self.create_label('Enter download link: '), self.create_text_entry_widget(35)\n\n def create_download_path_input_section(self):\n return self.create_label(self.SET_DOWNLOAD_PATH_MESSAGE), self.create_button('Set directory', self.set_download_path, 'red')\n\n def create_download_video_button(self):\n return self.create_button('Download Video', self.download_video)\n\n def create_download_mp3_button(self):\n return self.create_button('Download MP3', self.download_mp3)\n","repo_name":"PietroSassone/youtube-downloader-desktop-app","sub_path":"youtube_downloader/graphical_interface/common_file_downloader_interface.py","file_name":"common_file_downloader_interface.py","file_ext":"py","file_size_in_byte":3277,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"74931680692","text":"s1 = float(input(\"Primeiro Salário: \"))\ns2 = float(input(\"Segundo Salário: \"))\ns3 = float(input(\"Terceiro Salário: \"))\ns4 = float(input(\"Quarto Salário: \"))\n\nsoma1 = (s1 + s2 + s3 + s4)\n\nprint(20*\"*\", \"calculadora\", 20*\"*\")\nprint (\"Primeiro Salário: {:.2f} \\nSegundo Salário: {:.2f} \\nTerceiro Salário: {:.2f} \\nQuarto Salário: {:.2f}\" .format(s1, s2, s3, s4))\nprint (f\"Soma dos valores: {soma1:.2f}\")","repo_name":"llucassza/cursoApex","sub_path":"marcelo/modulo1/exercicio4.py","file_name":"exercicio4.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27540188025","text":"import tensorflow as tf\n\ndef grad_norm(grad):\n avoid_zero_div = tf.cast(1e-12, grad.dtype)\n shape = get_flat_shape(grad)\n std = tf.reshape(tf.contrib.keras.backend.std(tf.reshape(grad, [shape[0], -1]), axis=1),shape)\n std = tf.maximum(avoid_zero_div, std)\n return grad/std\ndef get_flat_shape(grad):\n t = tf.zeros(shape=tf.shape(grad)[0])\n for i in range(1,len(grad.get_shape().as_list())):\n t= tf.expand_dims(t,i)\n return tf.shape(t)\n\ndef get_gpu_status():\n with tf.Session() as sess:\n r = sess.run(tf.contrib.memory_stats.BytesInUse())\n return r\n\ndef gpu_session_config():\n # config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True, \n # intra_op_parallelism_threads=0, inter_op_parallelism_threads=0)\n config=tf.ConfigProto()\n config.gpu_options.allow_growth = True\n return config\n","repo_name":"cloudseasail/IJCAI19_AliAAAC","sub_path":"IJCAI19/module/utils_tf.py","file_name":"utils_tf.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"11612569080","text":"\"\"\"\nTests of neo.rawio.mearecrawio\n\n\"\"\"\n\nimport unittest\n\nfrom neo.rawio.mearecrawio import MEArecRawIO\n\nfrom neo.test.rawiotest.common_rawio_test import BaseTestRawIO\n\n\ntry:\n import MEArec as mr\n HAVE_MEAREC = True\nexcept ImportError:\n HAVE_MEAREC = False\n\n\n@unittest.skipUnless(HAVE_MEAREC, \"requires MEArec package\")\nclass TestMEArecRawIO(BaseTestRawIO, unittest.TestCase, ):\n rawioclass = MEArecRawIO\n entities_to_download = [\n 'mearec'\n ]\n entities_to_test = [\n 'mearec/mearec_test_10s.h5'\n ]\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"Shiva-A-Lindi/Behavioral-Analysis","sub_path":"Laser_detection/python-neo-0.10.0/neo/test/rawiotest/test_mearecrawio.py","file_name":"test_mearecrawio.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"11736279492","text":"from copy import deepcopy\n\nimport pytest\n\nfrom constructive_geometries import ConstructiveGeometries, Geomatcher, resolved_row\n\n\ndef test_default_setup():\n cg = ConstructiveGeometries()\n\n geomatcher = Geomatcher()\n assert \"GLO\" in geomatcher\n assert \"RoW\" not in geomatcher\n assert geomatcher[\"RoW\"] == set()\n assert len(geomatcher[\"GLO\"]) > 400\n assert geomatcher[\"AS\"] == set(cg.data[\"AS\"])\n assert geomatcher[(\"ecoinvent\", \"Russia (Europe)\")] == set(\n cg.data[\"Russia (Europe)\"]\n )\n assert geomatcher[\"Japan\"] == set(cg.data[\"JP\"])\n\n with pytest.raises(KeyError):\n geomatcher[\"Nope\"]\n\n\ndef test_magic_methods():\n g = Geomatcher()\n assert len(g) > 400\n for _ in g:\n pass\n\n assert \"NO\" in g\n assert g[\"NO\"]\n\n del g[\"NO\"]\n del g[\"Russia (Europe)\"]\n\n g[\"foo\"] = {1, 2, 3}\n assert \"foo\" in g\n\n\ndef test_provide_topology():\n given = {\n \"A\": {1, 2, 3},\n \"B\": {2, 3, 4},\n }\n g = Geomatcher(given.copy())\n assert g.topology == given\n\n\ndef test_split_faces():\n given = {\n \"A\": {1, 2, 3},\n \"B\": {2, 3, 4},\n }\n expected = {\n \"A\": {1, 2, 5, 6},\n \"B\": {2, 5, 6, 4},\n }\n g = Geomatcher(given)\n g.split_face(3)\n assert g.topology == expected\n assert 3 not in g.faces\n assert 5 in g.faces\n\n given = {\n \"A\": {1, 2, 3},\n \"B\": {2, 3, 4},\n }\n expected = {\n \"A\": {1, 2, 5, 6, 7},\n \"B\": {2, 5, 6, 7, 4},\n }\n g = Geomatcher(given)\n g.split_face(3, number=3)\n assert g.topology == expected\n\n given = {\n \"A\": {1, 2, 3},\n \"B\": {2, 3, 4},\n }\n expected = {\n \"A\": {1, 2, 10, 11},\n \"B\": {2, 10, 11, 4},\n }\n g = Geomatcher(given)\n g.split_face(3, ids={10, 11})\n assert g.topology == expected\n\n given = {\n \"A\": {1, 2, 3},\n \"B\": {2, 3, 4},\n }\n expected = {\n \"A\": {1, 2, 10, 11},\n \"B\": {2, 10, 11, 4},\n }\n g = Geomatcher(given)\n g.split_face(3, number=5, ids={10, 11})\n assert g.topology == expected\n\n\ndef test_empty_topology():\n g = Geomatcher({})\n assert g.topology == {}\n assert g.faces == set()\n assert \"NO\" not in g\n assert \"GLO\" not in g\n\n\ndef test_add_definitions():\n g = Geomatcher({})\n given = {\n \"A\": {1, 2, 3},\n \"B\": {2, 3, 4},\n }\n g.add_definitions(given, \"foo\", False)\n assert (\"foo\", \"A\") in g.topology\n assert g.faces == {1, 2, 3, 4}\n assert \"NO\" not in g\n assert \"GLO\" not in g\n\n\ndef test_add_definitions_relative():\n given = {\n \"A\": {1, 2, 3},\n \"B\": {2, 3, 4},\n }\n extra = {\"C\": [\"A\", \"B\"]}\n g = Geomatcher(given)\n g.add_definitions(extra, \"foo\")\n assert g.topology[(\"foo\", \"C\")] == {1, 2, 3, 4}\n assert \"A\" in g.topology\n assert \"NO\" not in g\n assert \"GLO\" not in g\n\n\ndef test_actual_key():\n given = {\n \"A\": {1, 2, 3},\n (\"silly\", \"B\"): {2, 3, 4},\n }\n g = Geomatcher(given, \"silly\")\n assert g[\"A\"]\n assert g[\"B\"]\n assert g[\"B\"]\n assert g[(\"silly\", \"B\")]\n\n with pytest.raises(KeyError):\n g[(\"silly\", \"A\")]\n\n assert g._actual_key(\"RoW\") == \"RoW\"\n\n g = Geomatcher()\n assert g._actual_key(\"GLO\") == \"GLO\"\n\n\ndef test_actual_key_coco():\n given = {\n \"AT\": {1, 2},\n }\n g = Geomatcher(given, \"silly\")\n\n assert g[\"AT\"]\n assert g[\"Austria\"]\n\n g = Geomatcher(given, \"silly\", use_coco=False)\n assert g[\"AT\"]\n with pytest.raises(KeyError):\n g[\"Austria\"]\n\n\ndef test_finish_filter_include_self():\n g = Geomatcher({\"A\": {1, 2}})\n given = [(\"A\", 4), (\"B\", 6), (\"C\", 3)]\n assert g._finish_filter(deepcopy(given), \"A\", True, False, True) == [\"B\", \"A\", \"C\"]\n assert g._finish_filter(deepcopy(given), \"A\", True, False, False) == [\"C\", \"A\", \"B\"]\n\n\ndef test_finish_filter_not_include_self():\n g = Geomatcher({\"A\": {1, 2}})\n given = [(\"A\", 4), (\"B\", 6), (\"C\", 3)]\n assert g._finish_filter(deepcopy(given), \"A\", False, False, True) == [\"B\", \"C\"]\n assert g._finish_filter(deepcopy(given), \"A\", False, False, False) == [\"C\", \"B\"]\n\n\ndef test_finish_filter_exclusive():\n given = {\n \"A\": {1, 2, 3},\n \"B\": {2, 3, 4},\n \"C\": {3, 4, 5},\n \"D\": {10, 11},\n \"E\": {5, 6, 10},\n }\n g = Geomatcher(given)\n lst = [(\"A\", 5), (\"B\", 6), (\"C\", 7), (\"D\", 8), (\"E\", 9)]\n result = g._finish_filter(lst, \"A\", True, True, True)\n # Start with E (biggest), then B (next possible)\n assert result == [\"E\", \"B\"]\n result = g._finish_filter(lst, \"A\", True, True, False)\n # Start with A (smallest), then D (next possible)\n assert result == [\"A\", \"D\"]\n\n\ndef test_finish_filter_row_ordering():\n # Test non-exclusive ordering of RoW; RoW not key\n given = {\n \"A\": set(range(10)),\n \"B\": {2, 3, 4},\n \"C\": {3, 4, 5},\n \"D\": {10, 11},\n \"E\": {5, 6, 10},\n }\n g = Geomatcher(given)\n lst = [(\"B\", 6), (\"RoW\", 7), (\"D\", 8), (\"E\", 9)]\n result = g._finish_filter(lst, \"A\", False, False, True)\n assert result[-1] == \"RoW\"\n result = g._finish_filter(lst, \"A\", False, False, False)\n assert result[0] == \"RoW\"\n\n\ndef test_finish_filter_row_exclusive_row_key():\n g = Geomatcher()\n assert (\n g._finish_filter(\n [(\"NO\", 4), (\"LT\", 3), (\"LV\", 2), (\"EE\", 1)], \"RoW\", False, True, True\n )\n == []\n )\n assert (\n g._finish_filter(\n [(\"NO\", 4), (\"LT\", 3), (\"LV\", 2), (\"EE\", 1)], \"RoW\", True, True, True\n )\n == []\n )\n assert (\n g._finish_filter(\n [(\"NO\", 4), (\"LT\", 3), (\"LV\", 2), (\"EE\", 1), (\"RoW\", 0)],\n \"RoW\",\n False,\n True,\n True,\n )\n == []\n )\n assert (\n g._finish_filter(\n [(\"NO\", 4), (\"LT\", 3), (\"LV\", 2), (\"EE\", 1), (\"RoW\", 0)],\n \"RoW\",\n True,\n True,\n True,\n )\n == [\"RoW\"]\n )\n assert (\n g._finish_filter(\n [(\"NO\", 4), (\"LT\", 3), (\"LV\", 2), (\"EE\", 1)], \"RoW\", False, True, False\n )\n == []\n )\n assert (\n g._finish_filter(\n [(\"NO\", 4), (\"LT\", 3), (\"LV\", 2), (\"EE\", 1)], \"RoW\", True, True, False\n )\n == []\n )\n assert (\n g._finish_filter(\n [(\"NO\", 4), (\"LT\", 3), (\"LV\", 2), (\"EE\", 1), (\"RoW\", 0)],\n \"RoW\",\n False,\n True,\n False,\n )\n == []\n )\n assert (\n g._finish_filter(\n [(\"NO\", 4), (\"LT\", 3), (\"LV\", 2), (\"EE\", 1), (\"RoW\", 0)],\n \"RoW\",\n True,\n True,\n False,\n )\n == [\"RoW\"]\n )\n\n\ndef test_intersects():\n g = Geomatcher()\n expected = [\n \"GLO\",\n (\"ecoinvent\", \"UN-AMERICAS\"),\n (\"ecoinvent\", \"RLA\"),\n (\"ecoinvent\", \"UN-CARIBBEAN\"),\n ]\n assert g.intersects(\"CU\") == expected\n assert g.intersects(\"CU\", exclusive=True) == [\"GLO\"]\n expected = [\n (\"ecoinvent\", \"UN-CARIBBEAN\"),\n (\"ecoinvent\", \"RLA\"),\n (\"ecoinvent\", \"UN-AMERICAS\"),\n \"GLO\",\n ]\n assert g.intersects(\"CU\", biggest_first=False) == expected\n\n only = [\n (\"ecoinvent\", \"RLA\"),\n (\"ecoinvent\", \"UN-AMERICAS\"),\n ]\n expected = [\n (\"ecoinvent\", \"UN-AMERICAS\"),\n (\"ecoinvent\", \"RLA\"),\n ]\n assert g.intersects(\"CU\", only=only) == expected\n\n\ndef test_contained():\n g = Geomatcher()\n expected = [\n \"US\",\n (\"ecoinvent\", \"US-ASCC\"),\n (\"ecoinvent\", \"US-NPCC\"),\n (\"ecoinvent\", \"US-HICC\"),\n (\"ecoinvent\", \"US-WECC\"),\n (\"ecoinvent\", \"US-SERC\"),\n (\"ecoinvent\", \"US-RFC\"),\n (\"ecoinvent\", \"US-FRCC\"),\n (\"ecoinvent\", \"US-MRO\"),\n (\"ecoinvent\", \"US-SPP\"),\n ]\n assert g.contained(\"US\")[:5] == expected[:5]\n expected.pop(0)\n assert g.contained(\"US\", include_self=False)[:5] == expected[:5]\n assert g.contained(\"US\", include_self=False, exclusive=True)[:5] == expected[:5]\n assert g.contained(\"US\", biggest_first=False, include_self=False)[-1] == (\n \"ecoinvent\",\n \"US-ASCC\",\n )\n\n expected = [\n \"US\",\n (\"ecoinvent\", \"US-ASCC\"),\n (\"ecoinvent\", \"US-NPCC\"),\n (\"ecoinvent\", \"US-HICC\"),\n (\"ecoinvent\", \"US-WECC\"),\n ]\n only = [\n (\"ecoinvent\", \"US-WECC\"),\n \"US\",\n (\"ecoinvent\", \"US-NPCC\"),\n (\"ecoinvent\", \"US-ASCC\"),\n (\"ecoinvent\", \"US-HICC\"),\n ]\n assert g.contained(\"US\", only=only) == expected\n\n\ndef test_within():\n g = Geomatcher()\n expected = [\n \"GLO\",\n (\"ecoinvent\", \"UN-EUROPE\"),\n (\"ecoinvent\", \"FSU\"),\n (\"ecoinvent\", \"UN-EEUROPE\"),\n (\"ecoinvent\", \"IAI Area, Russia & RER w/o EU27 & EFTA\"),\n \"RU\",\n ]\n assert g.within(\"RU\") == expected\n expected.pop(-1)\n assert g.within(\"RU\", include_self=False) == expected\n assert g.within(\"RU\", exclusive=True) == [\"GLO\"]\n expected = [\n \"RU\",\n (\"ecoinvent\", \"IAI Area, Russia & RER w/o EU27 & EFTA\"),\n (\"ecoinvent\", \"UN-EEUROPE\"),\n (\"ecoinvent\", \"FSU\"),\n (\"ecoinvent\", \"UN-EUROPE\"),\n \"GLO\",\n ]\n assert g.within(\"RU\", biggest_first=False) == expected\n\n expected = [\n (\"ecoinvent\", \"UN-EUROPE\"),\n (\"ecoinvent\", \"FSU\"),\n (\"ecoinvent\", \"UN-EEUROPE\"),\n ]\n only = [\n (\"ecoinvent\", \"UN-EUROPE\"),\n (\"ecoinvent\", \"FSU\"),\n (\"ecoinvent\", \"UN-EEUROPE\"),\n ]\n assert g.within(\"RU\", only=only) == expected\n\n\ndef test_intersects_row():\n g = Geomatcher()\n assert g.intersects(\"RoW\") == []\n assert g.intersects(\"RoW\", include_self=True) == []\n assert g.intersects(\"RoW\", include_self=True, only=[\"NO\", \"LT\", \"RoW\"]) == [\"RoW\"]\n assert g.intersects((\"ecoinvent\", \"NORDEL\"), only=[\"NO\", \"RoW\"]) == [\"NO\"]\n assert g.intersects(\n \"NO\", only=[\"NO\", \"RoW\"], include_self=True, exclusive=True\n ) == [\"NO\"]\n assert g.intersects(\n \"NO\", only=[\"NO\", \"RoW\"], include_self=True, exclusive=False\n ) == [\"NO\"]\n assert (\n g.intersects(\n (\"ecoinvent\", \"BALTSO\"),\n include_self=False,\n exclusive=True,\n only=[\"RoW\", \"EE\", \"LT\", \"LV\"],\n )\n == [\"EE\", \"LT\", \"LV\"]\n )\n assert (\n g.intersects(\n (\"ecoinvent\", \"BALTSO\"),\n include_self=False,\n exclusive=True,\n only=[\"RoW\", \"LT\", \"LV\"],\n )\n == [\"LT\", \"LV\"]\n )\n\n\ndef test_contained_row():\n g = Geomatcher()\n assert g.contained(\"RoW\") == []\n assert g.contained(\"RoW\", include_self=True, only=[\"RoW\"]) == [\"RoW\"]\n assert \"RoW\" not in g.contained(\"GLO\", only=[\"NO\", \"RoW\"])\n assert \"RoW\" not in g.contained(\"GLO\")\n assert \"RoW\" not in g.contained((\"ecoinvent\", \"RAS\"), only=[\"NO\", \"LT\", \"RoW\"])\n\n\ndef test_within_row():\n g = Geomatcher()\n assert g.within(\"RoW\") == [\"GLO\"]\n del g[\"GLO\"]\n assert g.within(\"RoW\") == []\n\n\ndef test_row_contextmanager_add_remove_row():\n g_orig = Geomatcher()\n assert \"RoW\" not in g_orig\n with resolved_row([\"NO\", \"LT\", \"EE\"], g_orig) as g:\n assert \"RoW\" in g\n assert \"RoW\" in g_orig\n assert g is g_orig\n assert \"RoW\" not in g_orig\n\n\ndef test_row_contextmanager_datasets_or_locations():\n g_orig = Geomatcher()\n with resolved_row([\"NO\", \"LT\", \"EE\"], g_orig) as g:\n assert \"RoW\" in g.intersects((\"ecoinvent\", \"BALTSO\"))\n given = [\n {\"location\": \"NO\"},\n {\"location\": \"LT\"},\n {\"location\": \"EE\"},\n ]\n with resolved_row(given, g_orig) as g:\n assert \"RoW\" in g.intersects((\"ecoinvent\", \"BALTSO\"))\n\n\ndef test_row_contextmanager_intersects():\n g_orig = Geomatcher()\n with resolved_row([\"NO\", \"LT\", \"EE\"], g_orig) as g:\n assert \"RoW\" in g.intersects((\"ecoinvent\", \"BALTSO\"))\n\n\ndef test_row_contextmanager_contained():\n g_orig = Geomatcher()\n with resolved_row([\"NO\", \"LT\", \"EE\"], g_orig) as g:\n assert \"RoW\" not in g.contained((\"ecoinvent\", \"BALTSO\"))\n assert \"LT\" in g.contained((\"ecoinvent\", \"BALTSO\"))\n assert \"RoW\" in g.contained(\"GLO\")\n\n\ndef test_row_contextmanager_within():\n g_orig = Geomatcher()\n with resolved_row([\"NO\", \"LT\", \"EE\"], g_orig) as g:\n assert g.within(\"RoW\") == [\"GLO\", \"RoW\"]\n assert g.within(\"RoW\", biggest_first=False) == [\"RoW\", \"GLO\"]\n","repo_name":"ecoinvent/constructive_geometries","sub_path":"tests/geomatcher_tests.py","file_name":"geomatcher_tests.py","file_ext":"py","file_size_in_byte":12417,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"9820324129","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Sep 1 08:06:44 2021\r\n\r\n@author: Admin\r\n\"\"\"\r\n\r\nfrom imodels import CLASSIFIERS\r\nimport numpy as np\r\nimport os\r\nimport time\r\nimport pandas as pd\r\nfrom IPython.display import clear_output\r\nfrom sklearn.metrics import accuracy_score\r\n\r\ndef convert(state):\r\n # Converts ordinals to rows and columns\r\n row = state // 12\r\n col = state % 12\r\n\r\n return [row + 1, col + 1]\r\n\r\n#Use classifier to be XAI AI algorithm\r\nclassifier = CLASSIFIERS[1]\r\nmodel = classifier()\r\n\r\ndata = pd.read_csv(\"../Data/input/original_train.csv\")\r\ny_train = data.action.values\r\nx_train = data.drop(\"action\", axis=1)\r\nfeatures = x_train.columns\r\nx_train = x_train.values\r\n\r\n\r\n\r\n\r\ndata_test = pd.read_csv(\"../Data/input/original_test.csv\")\r\ny_test = data_test.action.values\r\nx_test = data_test.drop(\"action\", axis=1)\r\nfeatures = x_test.columns\r\nx_test = x_test.values\r\n\r\n\r\n\r\nmodel.fit(x_train,y_train, feature_names = features)\r\n#Strategies for model prediction\r\nprint(model)\r\nRulelist =model.print_list()\r\nprint(Rulelist)\r\ny_pre = model.predict(x_test)\r\nprint(y_pre)\r\nprint(y_test)\r\nprint(accuracy_score(y_test, y_pre))\r\n\r\nfrom frozen_environment import FrozenLakeEnv,MAPS\r\n#load environment\r\nmap = MAPS['12x12']\r\ntotal_count = 123\r\nsuccess = 0\r\nfail = 0\r\n# Traverse judge_repeat for all points on the map\r\nfor i in range(len(map)):\r\n for j in range(len(map[i])):\r\n #If the location on the map is 'FROZEN', select it as the starting point for training\r\n if map[i][j] == 'F':\r\n env = FrozenLakeEnv(map_name='12x12',start=[i,j])\r\n time.sleep(2)\r\n\r\n state = env.reset()\r\n done = False\r\n\r\n\r\n time.sleep(1.5)\r\n\r\n steps = 0\r\n\r\n while not done:\r\n clear_output(wait=True)\r\n env.render()\r\n time.sleep(0.3)\r\n #Use classifier to predict actions, results are floating point, rounded to the nearest whole number\r\n print(([convert(state)])[0])\r\n print(model.predict([convert(state)])[0])\r\n action = round(model.predict([convert(state)])[0])\r\n action = int(action)\r\n\r\n #Execute in the environment using the generated actions\r\n state, reward, done, _ = env.step(action)\r\n steps += 1\r\n\r\n clear_output(wait=True)\r\n env.render()\r\n\r\n if reward == 1:\r\n print(f'You have found your frisbee 🥏 in {steps} steps.')\r\n time.sleep(2)\r\n success += 1\r\n else:\r\n print('You fell through a hole 🕳, Game Over! Please try again!')\r\n time.sleep(2)\r\n fail += 1\r\n clear_output(wait=True)\r\n\r\nprint(total_count,success,fail)\r\nprint(f'success rate = {success/total_count}')\r\n","repo_name":"XuHeyr20678/DSPR_2029883","sub_path":"Frozen Lake/Greedy_Rule_List/Greedy_Rule.py","file_name":"Greedy_Rule.py","file_ext":"py","file_size_in_byte":2868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73253742131","text":"from nturl2path import url2pathname\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name = 'home'),\n path('posts', views.posts, name = 'posts'),\n path('post//', views.post_detail, name = 'post_detail'),\n path('new_post', views.new_post, name = 'new_post'),\n path('users', views.users, name = 'users')\n \n]","repo_name":"Pwierenga/weblog_assignment","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9693402033","text":"from core import tool\nfrom core import modDatabase\n\nimport modArgona\n\nfrom msgMain import MsgMain\nfrom msgShort import MsgShort\n\"\"\"\nThis is the main part of Momocobot to handle income msg\nand reply required msg back to user.\n\nThe codes of this file mainly lineage from open_chat_message() in momocobot.py.\n\"\"\"\n\nclass Hande:\n \"\"\" handle received msg\n pass command to excute() for futher action (which is more complex)\n change cache info that store in memory (argon)\n and reply back to user\n \"\"\"\n def __init__(self,msg,argon):\n self.text = msg\n self.argo = argon\n\n self.resut = []\n self.cos = 0\n self.ekgu = 0\n\n msgMain = MsgMain(self.argo.lingua)\n primo = self.argo.database.get('mode',{})\n\n if max(primo.keys()) == 0 :\n self.resut = [msgMain.home({'keywo':self.text})]\n self.argo.keywo = self.text\n\n elif primo.get(max(primo.keys())) == 'creo':\n creodata = self.argo.database.get('creo',{})\n submo = creodata.get('submode','')\n if submo == '':\n self.resut = [msgMain.home({'keywo':self.text})]\n self.argo.keywo = self.text\n self.ekgu = 1\n elif submo == 'recom':\n self.argo.keywo = self.text\n self.ekgu = 1\n\n elif primo.get(max(primo.keys())) == 'saci':\n sacidata = self.argo.database.get('saci',{})\n submo = sacidata.get('submode','')\n if submo == '':\n self.resut = [msgMain.home({'keywo':self.text})]\n self.argo.keywo = self.text\n self.ekgu = 1\n elif submo == 'setio':\n self.argo.keywo = self.text\n self.ekgu = 1\n","repo_name":"SotongDJ/telepot-momoco","sub_path":"modHandle.py","file_name":"modHandle.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5678983136","text":"from typing import Callable, Tuple\n\nimport numpy as np\n\nimport flax.linen as nn\nimport jax\nimport jax.numpy as jnp\nfrom flax.core.frozen_dict import FrozenDict\nfrom flax.linen import dot_product_attention\nfrom jax import lax\nfrom jax.random import PRNGKey\n\nfrom ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward\nfrom ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel\nfrom ...utils import logging\nfrom .configuration_bert import BertConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"BertConfig\"\n_TOKENIZER_FOR_DOC = \"BertTokenizer\"\n\n\nBERT_START_DOCSTRING = r\"\"\"\n\n This model inherits from :class:`~transformers.FlaxPreTrainedModel`. Check the superclass documentation for the\n generic methods the library implements for all its model (such as downloading, saving and converting weights from\n PyTorch models)\n\n This model is also a Flax Linen `flax.nn.Module\n `__ subclass. Use it as a regular Flax\n Module and refer to the Flax documentation for all matter related to general usage and behavior.\n\n Finally, this model supports inherent JAX features such as:\n\n - `Just-In-Time (JIT) compilation `__\n - `Automatic Differentiation `__\n - `Vectorization `__\n - `Parallelization `__\n\n Parameters:\n config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nBERT_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`numpy.ndarray` of shape :obj:`({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`~transformers.BertTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`numpy.ndarray` of shape :obj:`({0})`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`numpy.ndarray` of shape :obj:`({0})`, `optional`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\n 1]``:\n\n - 0 corresponds to a `sentence A` token,\n - 1 corresponds to a `sentence B` token.\n\n `What are token type IDs? <../glossary.html#token-type-ids>`__\n position_ids (:obj:`numpy.ndarray` of shape :obj:`({0})`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\nclass FlaxBertLayerNorm(nn.Module):\n \"\"\"\n Layer normalization (https://arxiv.org/abs/1607.06450). Operates on the last axis of the input data.\n \"\"\"\n\n hidden_size: int\n epsilon: float = 1e-6\n dtype: jnp.dtype = jnp.float32\n use_bias: bool = True\n scale: bool = True\n scale_init: Callable[..., np.ndarray] = jax.nn.initializers.ones\n bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros\n\n def setup(self):\n self.weight = self.param(\"weight\", self.scale_init, (self.hidden_size,))\n self.bias = self.param(\"bias\", self.scale_init, (self.hidden_size,))\n\n def __call__(self, x):\n \"\"\"\n Applies layer normalization on the input. It normalizes the activations of the layer for each given example in\n a batch independently, rather than across a batch like Batch Normalization. i.e. applies a transformation that\n maintains the mean activation within each example close to 0 and the activation standard deviation close to 1\n\n Args:\n x: the inputs\n\n Returns:\n Normalized inputs (the same shape as inputs).\n \"\"\"\n mean = jnp.mean(x, axis=-1, keepdims=True)\n mean2 = jnp.mean(jax.lax.square(x), axis=-1, keepdims=True)\n var = mean2 - jax.lax.square(mean)\n mul = jax.lax.rsqrt(var + self.epsilon)\n\n if self.scale:\n mul = mul * jnp.asarray(self.weight)\n y = (x - mean) * mul\n\n if self.use_bias:\n y = y + jnp.asarray(self.bias)\n return y\n\n\nclass FlaxBertEmbedding(nn.Module):\n \"\"\"\n Specify a new class for doing the embedding stuff as Flax's one use 'embedding' for the parameter name and PyTorch\n use 'weight'\n \"\"\"\n\n vocab_size: int\n hidden_size: int\n initializer_range: float\n dtype: jnp.dtype = jnp.float32 # the dtype of the computation\n\n def setup(self):\n init_fn: Callable[..., np.ndarray] = jax.nn.initializers.normal(stddev=self.initializer_range)\n self.embeddings = self.param(\"weight\", init_fn, (self.vocab_size, self.hidden_size))\n\n def __call__(self, input_ids):\n return jnp.take(self.embeddings, input_ids, axis=0)\n\n\nclass FlaxBertEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\"\"\"\n\n config: BertConfig\n dtype: jnp.dtype = jnp.float32 # the dtype of the computation\n\n def setup(self):\n self.word_embeddings = FlaxBertEmbedding(\n self.config.vocab_size,\n self.config.hidden_size,\n initializer_range=self.config.initializer_range,\n dtype=self.dtype,\n )\n self.position_embeddings = FlaxBertEmbedding(\n self.config.max_position_embeddings,\n self.config.hidden_size,\n initializer_range=self.config.initializer_range,\n dtype=self.dtype,\n )\n self.token_type_embeddings = FlaxBertEmbedding(\n self.config.type_vocab_size,\n self.config.hidden_size,\n initializer_range=self.config.initializer_range,\n dtype=self.dtype,\n )\n self.LayerNorm = FlaxBertLayerNorm(hidden_size=self.config.hidden_size, dtype=self.dtype)\n self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)\n\n def __call__(self, input_ids, token_type_ids, position_ids, attention_mask, deterministic: bool = True):\n # Embed\n inputs_embeds = self.word_embeddings(jnp.atleast_2d(input_ids.astype(\"i4\")))\n position_embeds = self.position_embeddings(jnp.atleast_2d(position_ids.astype(\"i4\")))\n token_type_embeddings = self.token_type_embeddings(jnp.atleast_2d(token_type_ids.astype(\"i4\")))\n\n # Sum all embeddings\n hidden_states = inputs_embeds + jnp.broadcast_to(position_embeds, inputs_embeds.shape) + token_type_embeddings\n\n # Layer Norm\n hidden_states = self.LayerNorm(hidden_states)\n hidden_states = self.dropout(hidden_states, deterministic=deterministic)\n return hidden_states\n\n\nclass FlaxBertSelfAttention(nn.Module):\n config: BertConfig\n dtype: jnp.dtype = jnp.float32 # the dtype of the computation\n\n def setup(self):\n if self.config.hidden_size % self.config.num_attention_heads != 0:\n raise ValueError(\n \"`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads`: {self.config.num_attention_heads}\"\n )\n\n self.query = nn.Dense(\n self.config.hidden_size,\n dtype=self.dtype,\n kernel_init=jax.nn.initializers.normal(self.config.initializer_range, self.dtype),\n )\n self.key = nn.Dense(\n self.config.hidden_size,\n dtype=self.dtype,\n kernel_init=jax.nn.initializers.normal(self.config.initializer_range, self.dtype),\n )\n self.value = nn.Dense(\n self.config.hidden_size,\n dtype=self.dtype,\n kernel_init=jax.nn.initializers.normal(self.config.initializer_range, self.dtype),\n )\n\n def __call__(self, hidden_states, attention_mask, deterministic=True):\n head_dim = self.config.hidden_size // self.config.num_attention_heads\n\n query_states = self.query(hidden_states).reshape(\n hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)\n )\n value_states = self.value(hidden_states).reshape(\n hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)\n )\n key_states = self.key(hidden_states).reshape(\n hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)\n )\n\n # Convert the boolean attention mask to an attention bias.\n if attention_mask is not None:\n # attention mask in the form of attention bias\n attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))\n attention_bias = lax.select(\n attention_mask > 0,\n jnp.full(attention_mask.shape, 0.0).astype(self.dtype),\n jnp.full(attention_mask.shape, -1e10).astype(self.dtype),\n )\n else:\n attention_bias = None\n\n dropout_rng = None\n if not deterministic and self.dropout_rate > 0.0:\n dropout_rng = self.make_rng(\"dropout\")\n\n attn_output = dot_product_attention(\n query_states,\n key_states,\n value_states,\n bias=attention_bias,\n dropout_rng=dropout_rng,\n dropout_rate=self.config.attention_probs_dropout_prob,\n broadcast_dropout=True,\n deterministic=deterministic,\n dtype=self.dtype,\n precision=None,\n )\n\n return attn_output.reshape(attn_output.shape[:2] + (-1,))\n\n\nclass FlaxBertSelfOutput(nn.Module):\n config: BertConfig\n dtype: jnp.dtype = jnp.float32 # the dtype of the computation\n\n def setup(self):\n self.dense = nn.Dense(\n self.config.hidden_size,\n kernel_init=jax.nn.initializers.normal(self.config.initializer_range, self.dtype),\n dtype=self.dtype,\n )\n self.LayerNorm = FlaxBertLayerNorm(hidden_size=self.config.hidden_size)\n self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)\n\n def __call__(self, hidden_states, input_tensor, deterministic: bool = True):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states, deterministic=deterministic)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass FlaxBertAttention(nn.Module):\n config: BertConfig\n dtype: jnp.dtype = jnp.float32\n\n def setup(self):\n self.self = FlaxBertSelfAttention(self.config, dtype=self.dtype)\n self.output = FlaxBertSelfOutput(self.config, dtype=self.dtype)\n\n def __call__(self, hidden_states, attention_mask, deterministic=True):\n # Attention mask comes in as attention_mask.shape == (*batch_sizes, kv_length)\n # FLAX expects: attention_mask.shape == (*batch_sizes, 1, 1, kv_length) such that it is broadcastable\n # with attn_weights.shape == (*batch_sizes, num_heads, q_length, kv_length)\n attn_output = self.self(hidden_states, attention_mask, deterministic=deterministic)\n hidden_states = self.output(attn_output, hidden_states, deterministic=deterministic)\n return hidden_states\n\n\nclass FlaxBertIntermediate(nn.Module):\n config: BertConfig\n dtype: jnp.dtype = jnp.float32 # the dtype of the computation\n\n def setup(self):\n self.dense = nn.Dense(\n self.config.intermediate_size,\n kernel_init=jax.nn.initializers.normal(self.config.initializer_range, self.dtype),\n dtype=self.dtype,\n )\n self.activation = ACT2FN[self.config.hidden_act]\n\n def __call__(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.activation(hidden_states)\n return hidden_states\n\n\nclass FlaxBertOutput(nn.Module):\n config: BertConfig\n dtype: jnp.dtype = jnp.float32 # the dtype of the computation\n\n def setup(self):\n self.dense = nn.Dense(\n self.config.hidden_size,\n kernel_init=jax.nn.initializers.normal(self.config.initializer_range, self.dtype),\n dtype=self.dtype,\n )\n self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)\n self.LayerNorm = FlaxBertLayerNorm(hidden_size=self.config.hidden_size, dtype=self.dtype)\n\n def __call__(self, hidden_states, attention_output, deterministic: bool = True):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states, deterministic=deterministic)\n hidden_states = self.LayerNorm(hidden_states + attention_output)\n return hidden_states\n\n\nclass FlaxBertLayer(nn.Module):\n config: BertConfig\n dtype: jnp.dtype = jnp.float32 # the dtype of the computation\n\n def setup(self):\n self.attention = FlaxBertAttention(self.config, dtype=self.dtype)\n self.intermediate = FlaxBertIntermediate(self.config, dtype=self.dtype)\n self.output = FlaxBertOutput(self.config, dtype=self.dtype)\n\n def __call__(self, hidden_states, attention_mask, deterministic: bool = True):\n attention_output = self.attention(hidden_states, attention_mask, deterministic=deterministic)\n hidden_states = self.intermediate(attention_output)\n hidden_states = self.output(hidden_states, attention_output, deterministic=deterministic)\n return hidden_states\n\n\nclass FlaxBertLayerCollection(nn.Module):\n config: BertConfig\n dtype: jnp.dtype = jnp.float32 # the dtype of the computation\n\n def setup(self):\n self.layers = [\n FlaxBertLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers)\n ]\n\n def __call__(self, hidden_states, attention_mask, deterministic: bool = True):\n for layer in self.layers:\n hidden_states = layer(hidden_states, attention_mask, deterministic=deterministic)\n return hidden_states\n\n\nclass FlaxBertEncoder(nn.Module):\n config: BertConfig\n dtype: jnp.dtype = jnp.float32 # the dtype of the computation\n\n def setup(self):\n self.layer = FlaxBertLayerCollection(self.config, dtype=self.dtype)\n\n def __call__(self, hidden_states, attention_mask, deterministic: bool = True):\n return self.layer(hidden_states, attention_mask, deterministic=deterministic)\n\n\nclass FlaxBertPooler(nn.Module):\n config: BertConfig\n dtype: jnp.dtype = jnp.float32 # the dtype of the computation\n\n def setup(self):\n self.dense = nn.Dense(\n self.config.hidden_size,\n kernel_init=jax.nn.initializers.normal(self.config.initializer_range, self.dtype),\n dtype=self.dtype,\n )\n\n def __call__(self, hidden_states):\n cls_hidden_state = hidden_states[:, 0]\n cls_hidden_state = self.dense(cls_hidden_state)\n return nn.tanh(cls_hidden_state)\n\n\nclass FlaxBertPredictionHeadTransform(nn.Module):\n config: BertConfig\n dtype: jnp.dtype = jnp.float32\n\n def setup(self):\n self.dense = nn.Dense(self.config.hidden_size, dtype=self.dtype)\n self.activation = ACT2FN[self.config.hidden_act]\n self.LayerNorm = FlaxBertLayerNorm(hidden_size=self.config.hidden_size, dtype=self.dtype)\n\n def __call__(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.activation(hidden_states)\n return self.LayerNorm(hidden_states)\n\n\nclass FlaxBertLMPredictionHead(nn.Module):\n config: BertConfig\n dtype: jnp.dtype = jnp.float32\n\n def setup(self):\n self.transform = FlaxBertPredictionHeadTransform(self.config, dtype=self.dtype)\n self.decoder = nn.Dense(self.config.vocab_size, dtype=self.dtype)\n\n def __call__(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n\n\nclass FlaxBertOnlyMLMHead(nn.Module):\n config: BertConfig\n dtype: jnp.dtype = jnp.float32\n\n def setup(self):\n self.predictions = FlaxBertLMPredictionHead(self.config, dtype=self.dtype)\n\n def __call__(self, hidden_states):\n hidden_states = self.predictions(hidden_states)\n return hidden_states\n\n\nclass FlaxBertOnlyNSPHead(nn.Module):\n dtype: jnp.dtype = jnp.float32\n\n def setup(self):\n self.seq_relationship = nn.Dense(2, dtype=self.dtype)\n\n def __call__(self, pooled_output):\n return self.seq_relationship(pooled_output)\n\n\nclass FlaxBertPreTrainingHeads(nn.Module):\n config: BertConfig\n dtype: jnp.dtype = jnp.float32\n\n def setup(self):\n self.predictions = FlaxBertLMPredictionHead(self.config, dtype=self.dtype)\n self.seq_relationship = nn.Dense(2, dtype=self.dtype)\n\n def __call__(self, hidden_states, pooled_output):\n prediction_scores = self.predictions(hidden_states)\n seq_relationship_score = self.seq_relationship(pooled_output)\n return prediction_scores, seq_relationship_score\n\n\nclass FlaxBertPreTrainedModel(FlaxPreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = BertConfig\n base_model_prefix = \"bert\"\n\n def _check_inputs(self, input_ids, attention_mask, token_type_ids, position_ids):\n if token_type_ids is None:\n token_type_ids = jnp.ones_like(input_ids)\n\n if position_ids is None:\n position_ids = jnp.arange(jnp.atleast_2d(input_ids).shape[-1])\n\n if attention_mask is None:\n attention_mask = jnp.ones_like(input_ids)\n\n return input_ids, attention_mask, token_type_ids, position_ids\n\n def init(self, rng: jax.random.PRNGKey, input_shape: Tuple) -> FrozenDict:\n input_ids, attention_mask, token_type_ids, position_ids = self._check_inputs(\n jnp.zeros(input_shape, dtype=\"i4\"), None, None, None\n )\n\n params_rng, dropout_rng = jax.random.split(rng)\n rngs = {\"params\": params_rng, \"dropout\": dropout_rng}\n\n return self.module.init(rngs, input_ids, attention_mask, token_type_ids, position_ids)[\"params\"]\n\n\n@add_start_docstrings(\n \"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.\",\n BERT_START_DOCSTRING,\n)\nclass FlaxBertModel(FlaxBertPreTrainedModel):\n \"\"\"\n The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of\n cross-attention is added between the self-attention layers, following the architecture described in `Attention is\n all you need `__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,\n Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.\n \"\"\"\n\n def __init__(\n self, config: BertConfig, input_shape: Tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, **kwargs\n ):\n module = FlaxBertModule(config=config, dtype=dtype, **kwargs)\n\n super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype)\n\n @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n def __call__(\n self,\n input_ids,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n params: dict = None,\n dropout_rng: PRNGKey = None,\n train: bool = False,\n ):\n input_ids, attention_mask, token_type_ids, position_ids = self._check_inputs(\n input_ids, attention_mask, token_type_ids, position_ids\n )\n\n # Handle any PRNG if needed\n rngs = {}\n if dropout_rng is not None:\n rngs[\"dropout\"] = dropout_rng\n\n return self.module.apply(\n {\"params\": params or self.params},\n jnp.array(input_ids, dtype=\"i4\"),\n jnp.array(attention_mask, dtype=\"i4\"),\n jnp.array(token_type_ids, dtype=\"i4\"),\n jnp.array(position_ids, dtype=\"i4\"),\n not train,\n rngs=rngs,\n )\n\n\nclass FlaxBertModule(nn.Module):\n config: BertConfig\n dtype: jnp.dtype = jnp.float32 # the dtype of the computation\n add_pooling_layer: bool = True\n\n def setup(self):\n self.embeddings = FlaxBertEmbeddings(self.config, dtype=self.dtype)\n self.encoder = FlaxBertEncoder(self.config, dtype=self.dtype)\n self.pooler = FlaxBertPooler(self.config, dtype=self.dtype)\n\n def __call__(self, input_ids, attention_mask, token_type_ids, position_ids, deterministic: bool = True):\n\n hidden_states = self.embeddings(\n input_ids, token_type_ids, position_ids, attention_mask, deterministic=deterministic\n )\n hidden_states = self.encoder(hidden_states, attention_mask, deterministic=deterministic)\n\n if not self.add_pooling_layer:\n return hidden_states\n\n pooled = self.pooler(hidden_states)\n return hidden_states, pooled\n\n\n@add_start_docstrings(\n \"\"\"\n Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next\n sentence prediction (classification)` head.\n \"\"\",\n BERT_START_DOCSTRING,\n)\nclass FlaxBertForPreTraining(FlaxBertPreTrainedModel):\n def __init__(\n self, config: BertConfig, input_shape: Tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, **kwargs\n ):\n module = FlaxBertForPreTrainingModule(config, **kwargs)\n\n super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype)\n\n @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n def __call__(\n self,\n input_ids,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n params: dict = None,\n dropout_rng: PRNGKey = None,\n train: bool = False,\n ):\n input_ids, attention_mask, token_type_ids, position_ids = self._check_inputs(\n input_ids, attention_mask, token_type_ids, position_ids\n )\n\n # Handle any PRNG if needed\n rngs = {}\n if dropout_rng is not None:\n rngs[\"dropout\"] = dropout_rng\n\n return self.module.apply(\n {\"params\": params or self.params},\n jnp.array(input_ids, dtype=\"i4\"),\n jnp.array(attention_mask, dtype=\"i4\"),\n jnp.array(token_type_ids, dtype=\"i4\"),\n jnp.array(position_ids, dtype=\"i4\"),\n not train,\n rngs=rngs,\n )\n\n\nclass FlaxBertForPreTrainingModule(nn.Module):\n config: BertConfig\n dtype: jnp.dtype = jnp.float32\n\n def setup(self):\n self.bert = FlaxBertModule(config=self.config, dtype=self.dtype)\n self.cls = FlaxBertPreTrainingHeads(config=self.config, dtype=self.dtype)\n\n def __call__(\n self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, deterministic: bool = True\n ):\n # Model\n hidden_states, pooled_output = self.bert(\n input_ids, attention_mask, token_type_ids, position_ids, deterministic=deterministic\n )\n prediction_scores, seq_relationship_score = self.cls(hidden_states, pooled_output)\n\n return (prediction_scores, seq_relationship_score)\n\n\n@add_start_docstrings(\"\"\"Bert Model with a `language modeling` head on top. \"\"\", BERT_START_DOCSTRING)\nclass FlaxBertForMaskedLM(FlaxBertPreTrainedModel):\n def __init__(\n self, config: BertConfig, input_shape: Tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, **kwargs\n ):\n module = FlaxBertForMaskedLMModule(config, **kwargs)\n\n super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype)\n\n @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n def __call__(\n self,\n input_ids,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n params: dict = None,\n dropout_rng: PRNGKey = None,\n train: bool = False,\n ):\n input_ids, attention_mask, token_type_ids, position_ids = self._check_inputs(\n input_ids, attention_mask, token_type_ids, position_ids\n )\n\n # Handle any PRNG if needed\n rngs = {}\n if dropout_rng is not None:\n rngs[\"dropout\"] = dropout_rng\n\n return self.module.apply(\n {\"params\": params or self.params},\n jnp.array(input_ids, dtype=\"i4\"),\n jnp.array(attention_mask, dtype=\"i4\"),\n jnp.array(token_type_ids, dtype=\"i4\"),\n jnp.array(position_ids, dtype=\"i4\"),\n not train,\n rngs=rngs,\n )\n\n\nclass FlaxBertForMaskedLMModule(nn.Module):\n config: BertConfig\n dtype: jnp.dtype = jnp.float32\n\n def setup(self):\n self.bert = FlaxBertModule(config=self.config, add_pooling_layer=False, dtype=self.dtype)\n self.cls = FlaxBertOnlyMLMHead(config=self.config, dtype=self.dtype)\n\n def __call__(\n self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, deterministic: bool = True\n ):\n # Model\n hidden_states = self.bert(input_ids, attention_mask, token_type_ids, position_ids, deterministic=deterministic)\n\n # Compute the prediction scores\n logits = self.cls(hidden_states)\n\n return (logits,)\n\n\n@add_start_docstrings(\n \"\"\"Bert Model with a `next sentence prediction (classification)` head on top. \"\"\",\n BERT_START_DOCSTRING,\n)\nclass FlaxBertForNextSentencePrediction(FlaxBertPreTrainedModel):\n def __init__(\n self, config: BertConfig, input_shape: Tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, **kwargs\n ):\n module = FlaxBertForNextSentencePredictionModule(config, **kwargs)\n super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype)\n\n @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n def __call__(\n self,\n input_ids,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n params: dict = None,\n dropout_rng: PRNGKey = None,\n train: bool = False,\n ):\n input_ids, attention_mask, token_type_ids, position_ids = self._check_inputs(\n input_ids, attention_mask, token_type_ids, position_ids\n )\n\n # Handle any PRNG if needed\n rngs = {}\n if dropout_rng is not None:\n rngs[\"dropout\"] = dropout_rng\n\n return self.module.apply(\n {\"params\": params or self.params},\n jnp.array(input_ids, dtype=\"i4\"),\n jnp.array(attention_mask, dtype=\"i4\"),\n jnp.array(token_type_ids, dtype=\"i4\"),\n jnp.array(position_ids, dtype=\"i4\"),\n not train,\n rngs=rngs,\n )\n\n\nclass FlaxBertForNextSentencePredictionModule(nn.Module):\n config: BertConfig\n dtype: jnp.dtype = jnp.float32\n\n def setup(self):\n self.bert = FlaxBertModule(config=self.config, dtype=self.dtype)\n self.cls = FlaxBertOnlyNSPHead(dtype=self.dtype)\n\n def __call__(\n self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, deterministic: bool = True\n ):\n # Model\n _, pooled_output = self.bert(\n input_ids, attention_mask, token_type_ids, position_ids, deterministic=deterministic\n )\n\n seq_relationship_scores = self.cls(pooled_output)\n return (seq_relationship_scores,)\n\n\n@add_start_docstrings(\n \"\"\"\n Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n \"\"\",\n BERT_START_DOCSTRING,\n)\nclass FlaxBertForSequenceClassification(FlaxBertPreTrainedModel):\n def __init__(\n self, config: BertConfig, input_shape: Tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, **kwargs\n ):\n module = FlaxBertForSequenceClassificationModule(config, **kwargs)\n super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype)\n\n @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n def __call__(\n self,\n input_ids,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n params: dict = None,\n dropout_rng: PRNGKey = None,\n train: bool = False,\n ):\n input_ids, attention_mask, token_type_ids, position_ids = self._check_inputs(\n input_ids, attention_mask, token_type_ids, position_ids\n )\n\n # Handle any PRNG if needed\n rngs = {}\n if dropout_rng is not None:\n rngs[\"dropout\"] = dropout_rng\n\n return self.module.apply(\n {\"params\": params or self.params},\n jnp.array(input_ids, dtype=\"i4\"),\n jnp.array(attention_mask, dtype=\"i4\"),\n jnp.array(token_type_ids, dtype=\"i4\"),\n jnp.array(position_ids, dtype=\"i4\"),\n not train,\n rngs=rngs,\n )\n\n\nclass FlaxBertForSequenceClassificationModule(nn.Module):\n config: BertConfig\n dtype: jnp.dtype = jnp.float32\n\n def setup(self):\n self.bert = FlaxBertModule(config=self.config, dtype=self.dtype)\n self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)\n self.classifier = nn.Dense(\n self.config.num_labels,\n dtype=self.dtype,\n )\n\n def __call__(\n self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, deterministic: bool = True\n ):\n # Model\n _, pooled_output = self.bert(\n input_ids, attention_mask, token_type_ids, position_ids, deterministic=deterministic\n )\n\n pooled_output = self.dropout(pooled_output, deterministic=deterministic)\n logits = self.classifier(pooled_output)\n\n return (logits,)\n\n\n@add_start_docstrings(\n \"\"\"\n Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n \"\"\",\n BERT_START_DOCSTRING,\n)\nclass FlaxBertForMultipleChoice(FlaxBertPreTrainedModel):\n def __init__(\n self, config: BertConfig, input_shape: Tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, **kwargs\n ):\n module = FlaxBertForMultipleChoiceModule(config, **kwargs)\n super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype)\n\n @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format(\"batch_size, num_choices, sequence_length\"))\n def __call__(\n self,\n input_ids,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n params: dict = None,\n dropout_rng: PRNGKey = None,\n train: bool = False,\n ):\n input_ids, attention_mask, token_type_ids, position_ids = self._check_inputs(\n input_ids, attention_mask, token_type_ids, position_ids\n )\n\n # Handle any PRNG if needed\n rngs = {}\n if dropout_rng is not None:\n rngs[\"dropout\"] = dropout_rng\n\n return self.module.apply(\n {\"params\": params or self.params},\n jnp.array(input_ids, dtype=\"i4\"),\n jnp.array(attention_mask, dtype=\"i4\"),\n jnp.array(token_type_ids, dtype=\"i4\"),\n jnp.array(position_ids, dtype=\"i4\"),\n not train,\n rngs=rngs,\n )\n\n\nclass FlaxBertForMultipleChoiceModule(nn.Module):\n config: BertConfig\n dtype: jnp.dtype = jnp.float32\n\n def setup(self):\n self.bert = FlaxBertModule(config=self.config, dtype=self.dtype)\n self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)\n self.classifier = nn.Dense(1, dtype=self.dtype)\n\n def __call__(\n self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, deterministic: bool = True\n ):\n num_choices = input_ids.shape[1]\n input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None\n attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None\n token_type_ids = token_type_ids.reshape(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None\n position_ids = position_ids.reshape(-1, position_ids.shape[-1]) if position_ids is not None else None\n\n # Model\n _, pooled_output = self.bert(\n input_ids, attention_mask, token_type_ids, position_ids, deterministic=deterministic\n )\n\n pooled_output = self.dropout(pooled_output, deterministic=deterministic)\n logits = self.classifier(pooled_output)\n\n reshaped_logits = logits.reshape(-1, num_choices)\n\n return (reshaped_logits,)\n\n\n@add_start_docstrings(\n \"\"\"\n Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n \"\"\",\n BERT_START_DOCSTRING,\n)\nclass FlaxBertForTokenClassification(FlaxBertPreTrainedModel):\n def __init__(\n self, config: BertConfig, input_shape: Tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, **kwargs\n ):\n module = FlaxBertForTokenClassificationModule(config, **kwargs)\n super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype)\n\n @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n def __call__(\n self,\n input_ids,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n params: dict = None,\n dropout_rng: PRNGKey = None,\n train: bool = False,\n ):\n input_ids, attention_mask, token_type_ids, position_ids = self._check_inputs(\n input_ids, attention_mask, token_type_ids, position_ids\n )\n\n # Handle any PRNG if needed\n rngs = {}\n if dropout_rng is not None:\n rngs[\"dropout\"] = dropout_rng\n\n return self.module.apply(\n {\"params\": params or self.params},\n jnp.array(input_ids, dtype=\"i4\"),\n jnp.array(attention_mask, dtype=\"i4\"),\n jnp.array(token_type_ids, dtype=\"i4\"),\n jnp.array(position_ids, dtype=\"i4\"),\n not train,\n rngs=rngs,\n )\n\n\nclass FlaxBertForTokenClassificationModule(nn.Module):\n config: BertConfig\n dtype: jnp.dtype = jnp.float32\n\n def setup(self):\n self.bert = FlaxBertModule(config=self.config, dtype=self.dtype, add_pooling_layer=False)\n self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)\n self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype)\n\n def __call__(\n self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, deterministic: bool = True\n ):\n # Model\n hidden_states = self.bert(input_ids, attention_mask, token_type_ids, position_ids, deterministic=deterministic)\n\n hidden_states = self.dropout(hidden_states, deterministic=deterministic)\n logits = self.classifier(hidden_states)\n\n return (logits,)\n\n\n@add_start_docstrings(\n \"\"\"\n Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n \"\"\",\n BERT_START_DOCSTRING,\n)\nclass FlaxBertForQuestionAnswering(FlaxBertPreTrainedModel):\n def __init__(\n self, config: BertConfig, input_shape: Tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, **kwargs\n ):\n module = FlaxBertForQuestionAnsweringModule(config, **kwargs)\n super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype)\n\n @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n def __call__(\n self,\n input_ids,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n params: dict = None,\n dropout_rng: PRNGKey = None,\n train: bool = False,\n ):\n input_ids, attention_mask, token_type_ids, position_ids = self._check_inputs(\n input_ids, attention_mask, token_type_ids, position_ids\n )\n\n # Handle any PRNG if needed\n rngs = {}\n if dropout_rng is not None:\n rngs[\"dropout\"] = dropout_rng\n\n return self.module.apply(\n {\"params\": params or self.params},\n jnp.array(input_ids, dtype=\"i4\"),\n jnp.array(attention_mask, dtype=\"i4\"),\n jnp.array(token_type_ids, dtype=\"i4\"),\n jnp.array(position_ids, dtype=\"i4\"),\n not train,\n rngs=rngs,\n )\n\n\nclass FlaxBertForQuestionAnsweringModule(nn.Module):\n config: BertConfig\n dtype: jnp.dtype = jnp.float32\n\n def setup(self):\n self.bert = FlaxBertModule(config=self.config, dtype=self.dtype, add_pooling_layer=False)\n self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype)\n\n def __call__(\n self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, deterministic: bool = True\n ):\n # Model\n hidden_states = self.bert(input_ids, attention_mask, token_type_ids, position_ids, deterministic=deterministic)\n\n logits = self.qa_outputs(hidden_states)\n start_logits, end_logits = logits.split(self.config.num_labels, axis=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n\n return (start_logits, end_logits)\n","repo_name":"dropreg/R-Drop","sub_path":"huggingface_transformer_src/src/transformers/models/bert/modeling_flax_bert.py","file_name":"modeling_flax_bert.py","file_ext":"py","file_size_in_byte":38619,"program_lang":"python","lang":"en","doc_type":"code","stars":834,"dataset":"github-code","pt":"21"} +{"seq_id":"23673251248","text":"\"\"\"\nleast squares fitting on (ZTF) data with observations in two bands\nwe fit for the peak, using a Gaussian rise and an exponential decay\nwe measure the mean color pre-peak, and allow for a color-change post-peak\n\ngoal is photometric typing of alerts into:\n\n - \"SN\" or \"Ia\" : based on rise time \n - \"AGN\"\t\t: slow rise or fading\n - \"CV\"\t\t\t: fast\n - \"TDE\"\t\t: not the above and constant color or blue\n\n\"\"\"\n\nimport numpy as np\n\nlog10, log, exp, sqrt = np.log10, np.log, np.exp, np.sqrt\nfrom scipy.optimize import leastsq\nimport astropy.stats\n\n\n# flux/mag conversion without zeropoint (only used to force fit in mag space)\ndef f2m(flux):\n return -2.5 * log10(flux)\n\n\ndef m2f(mag):\n return 10 ** (-0.4 * (mag))\n\n\ndef line(p, x):\n return p[0] + p[1] * x\n\n\ndef gauss(x, sigma):\n return np.exp(-0.5 * x**2 / (sigma**2))\n\n\nminfs, maxfs = log(1e-9), log(1e9) # max fit slope for exponential/gaussian\n\n\ndef broken_gauss(p, x, dt_fix=False, fid=None):\n\n p = list(p)\n # check if we want to return only a fading/rising function\n if dt_fix is not False:\n x_peak = dt_fix\n\n if dt_fix > 0: # rising only\n a1 = m2f(p[0])\n p[1] = np.clip(p[1], minfs, maxfs)\n b1 = exp(p[1])\n return a1 * gauss(x - x_peak, b1)\n else: # fading only\n a2 = m2f(p[0])\n p[1] = np.clip(p[1], minfs, maxfs)\n b2 = exp(p[1])\n return a2 * np.exp(-(x - x_peak) / b2)\n\n # gaussian rise\n x_peak = np.clip(p[0], 0, max(x))\n a1 = m2f(p[1])\n p[2] = np.clip(p[2], minfs, maxfs)\n p[3] = np.clip(p[3], minfs, maxfs)\n b1 = exp(p[2])\n leftside = a1 * gauss(x - x_peak, b1)\n\n # exponential decay\n a2 = a1 * gauss(0, b1)\n b2 = exp(p[3])\n rightside = a2 * np.exp(-(x - x_peak) / b2)\n\n leftside[x > x_peak] = 0\n rightside[x <= x_peak] = 0\n\n return leftside + rightside\n\n\nminmaxcs = 0.1 # min/max colorslope\nmincolor, maxcolor = -1, +3\n\n\ndef color_func(x, x_peak, mean_color_in, slope_in):\n\n mean_color = np.clip(mean_color_in, mincolor, maxcolor)\n slope = np.clip(slope_in, -minmaxcs, minmaxcs)\n color = mean_color + (x - x_peak) * slope\n # ileft = x<=x_peak\n # if sum(ileft) and (x_peak>0):\n # \tcolor[ileft] = mean_color\n return color\n\n\ndef broken_gauss_twocolor(\n p, x, dt_fix=False, fid=None, flt1=\"g\", flt2=\"r\", x_peakc=None\n):\n\n p = list(p)\n both = broken_gauss(p, x, dt_fix=dt_fix)\n\n if dt_fix is not False:\n if not x_peakc:\n x_peakc = np.median(x) # new in 2019 (was 0)\n p[2] = np.clip(p[2], mincolor, maxcolor)\n # p[3] = np.clip(p[3], -minmaxcs, minmaxcs)\n if dt_fix > 0:\n p_col = p[2], 0\n else:\n p_col = p[2], p[3]\n else:\n if not x_peakc:\n x_peakc = np.median(x) # new in 2019 np.clip(p[0], 0, max(x))\n p[4] = np.clip(p[4], mincolor, maxcolor)\n p[5] = np.clip(p[5], -minmaxcs, minmaxcs)\n p_col = p[4], p[5]\n\n iflt1 = fid == flt1\n iflt2 = fid == flt2\n\n color = color_func(x, x_peakc, p_col[0], p_col[1])\n\n # this can be tricky, when one band has very little data thing can go wrong\n both[iflt1] *= 10 ** (-0.4 * color[iflt1] / 2.0)\n both[iflt2] *= 10 ** (+0.4 * color[iflt2] / 2.0)\n\n return both\n\n\ndef res(p, x, y, yerr, model_func, dt_fix=False, fid=None, islim=None):\n\n model_flux = model_func(p, x, dt_fix=dt_fix, fid=fid)\n\n chi = (model_flux - y) / yerr\n chi = np.clip(chi, -1e5, 1e5)\n\n # do a rough prior on rise time, increase chi2 if larger/smaller than 100/0.01\n\n if (model_func is broken_gauss) or (model_func is broken_gauss_twocolor):\n\n p_col = 0, 0\n # check if we we have a fixed time of peak\n if dt_fix is False:\n logb1 = p[2]\n logb2 = p[3]\n if model_func is broken_gauss_twocolor:\n p_col = p[4], p[5]\n else:\n if dt_fix > 0: # rising only\n logb1 = p[1]\n logb2 = 0\n if model_func is broken_gauss_twocolor:\n p_col = p[2], 0\n else: # fading only\n logb1 = 0\n logb2 = p[1]\n if model_func is broken_gauss_twocolor:\n p_col = p[2], p[3]\n\n # log(100) = 4.60517\n rfmax = log(100)\n if abs(logb1) > rfmax:\n chi += (abs(logb1) - rfmax) / len(x)\n if abs(logb2) > 4.605:\n chi += (abs(logb2) - rfmax) / len(x)\n\n # weak push for constant color (helpful if limited data available?)\n chi += (p_col[1] - 0) / 0.05 / len(x)\n\n return chi\n\n\ndef flex_fit_wclip(\n dtime,\n flux,\n flux_err,\n fid,\n logger,\n isdetect=None,\n filters=[\"g\", \"r\"],\n ax=None,\n verbose=True,\n niter=0,\n):\n \"\"\"\n run flex fit a few time for clipping\n also pick the preferred band for reporting the results\n \"\"\"\n\n lc_data = {}\n plot_info = {}\n result, lsq, flux_diff = flex_fit(\n dtime,\n flux,\n flux_err,\n fid,\n logger,\n isdetect=isdetect,\n filters=[\"g\", \"r\"],\n ax=ax,\n verbose=True,\n )\n # print('dtime:', dtime)\n (\n lc_data[\"dtime\"],\n lc_data[\"flux\"],\n lc_data[\"flux_err\"],\n lc_data[\"fid\"],\n lc_data[\"isdetect\"],\n ) = (dtime, flux, flux_err, fid, isdetect)\n plot_info[\"lsq_out\"] = lsq\n plot_info[\"dt_fix\"] = result[\"dt_fix\"]\n outliers = (abs(flux_diff / flux_err) > 7) * isdetect\n logger.info(\"flex_fit_wclip: # of outliers {0}\".format(sum(outliers)))\n\n # ---\n # pick what band(s) to use store the final results\n final_result = {}\n flt_pick = None\n if (\"color\" in lsq) and ((\"r\" in lsq) or (\"g\" in lsq)):\n flt_pick = \"color\"\n elif \"r\" in lsq:\n flt_pick = \"r\"\n elif \"g\" in lsq:\n flt_pick = \"g\"\n if flt_pick is not None:\n for item in result[flt_pick]:\n final_result[item] = result[flt_pick][item]\n final_result[\"band\"] = flt_pick\n else:\n logger.info(\"flex_fit_wclip: no band yielded a succesful fit\")\n\n if (sum(outliers) == 0) or niter > 5:\n plot_info[\"dtime_out\"], plot_info[\"flux_out\"] = None, None\n return final_result, lc_data, plot_info\n else:\n # print (flux_diff/flux_err)\n # print (flux_diff)\n logger.info(\n \"flex_fit_wclip: running again after outlier rejection, attempts left={0}\".format(\n 5 - niter\n )\n )\n\n # allow one rejection\n maxout = max(abs(flux_diff[isdetect] / flux_err[isdetect]))\n outliers = abs(flux_diff / flux_err) == maxout\n logger.info(f\"outliers with flux: {flux[outliers]}\")\n\n plot_info[\"dtime_out\"] = dtime[outliers]\n plot_info[\"flux_out\"] = flux[outliers]\n\n dtime = dtime[outliers == False]\n flux = flux[outliers == False]\n flux_err = flux_err[outliers == False]\n fid = fid[outliers == False]\n isdetect = isdetect[outliers == False]\n\n if len(flux[isdetect]) < 4:\n logger.info(\"flex_fit_wclip: not enough data left, keeping original result\")\n return final_result, lc_data, plot_info\n\n # loop continues\n return flex_fit_wclip(\n dtime,\n flux,\n flux_err,\n fid,\n logger,\n isdetect=isdetect,\n niter=niter + 1,\n filters=[\"g\", \"r\"],\n ax=ax,\n verbose=verbose,\n )\n\n\ndef flex_fit(\n dtime,\n flux,\n flux_err,\n fid,\n logger,\n isdetect=None,\n filters=[\"g\", \"r\"],\n ax=None,\n verbose=True,\n):\n \"\"\"\n function to fit a light curve with one or two bands with two Gaussian and get some color info\n returning scipy.optimize.leastq output dicts for each band and for the fit to both bands\n\n observations that are upper limits should have zero flux and be indicated with the isdetect=[] input (boolians array)\n \"\"\"\n\n from .flex_metrics import flux2mag, mag2flux\n\n # unless otherwise indicated, asume all data is a detection\n if isdetect is None:\n isdetect = np.repeat(True, len(dtime))\n\n # output dicts\n dt_fix = {}\n result = {k: {} for k in filters + [\"color\"]}\n lsq_out = {}\n\n # do initial guess of parameters\n flux_peak_guess = flux[isdetect][\n np.argsort(flux[isdetect])[-1]\n ] # pick brightest point as peak\n t_peak_guess = dtime[isdetect][np.argmax(flux[isdetect])]\n islim = isdetect == False\n\n flux_diff = np.zeros(len(flux))\n\n # irise = dtime t_peak_guess\n if sum(ipost * isdetect):\n sigma_fade_guess = np.clip(\n np.interp(\n flux_peak_guess / 2.0,\n flux[ipost * isdetect][::-1],\n dtime[ipost * isdetect][::-1],\n )\n - t_peak_guess,\n 1,\n 40,\n )\n else:\n sigma_fade_guess = 20\n\n if verbose:\n logger.info(\n \"guessed time of peak (wrt first obs) {0:0.2f}\".format(t_peak_guess)\n )\n logger.info(\n \"guessed peak (flux, mag) {0:0.2f} {1:0.2f}\".format(\n flux_peak_guess, flux2mag(flux_peak_guess)\n )\n )\n logger.info(\n \"guessed sigma (rise, fade) {0:0.2f} {1:0.2f}\".format(\n sigma_rise_guess, sigma_fade_guess\n )\n )\n\n # do single band fit for the two filter in fid\n for k in filters:\n\n iflt = fid == k\n if verbose:\n logger.info(str(k) + \" \" + str(sum(iflt * isdetect)) + \" \" + str(sum(iflt)))\n\n if (sum(iflt * isdetect) > 2) and (sum(iflt) > 3):\n\n # do the least-square fit\n p0 = [\n t_peak_guess,\n f2m(flux_peak_guess),\n log(sigma_rise_guess),\n log(sigma_fade_guess),\n ]\n dt_fix[k] = False\n lsq_out[k] = leastsq(\n res,\n p0,\n (dtime[iflt], flux[iflt], flux_err[iflt], broken_gauss, dt_fix[k]),\n full_output=True,\n )\n\n result[k][\"dtime_peak\"] = lsq_out[k][0][0]\n result[k][\"mag_peak\"] = flux2mag(m2f(lsq_out[k][0][1]))\n result[k][\"sigma_rise\"] = exp(lsq_out[k][0][2])\n result[k][\"sigma_fade\"] = exp(lsq_out[k][0][3])\n\n if lsq_out[k][1] is not None:\n result[k][\"e_dtime_peak\"] = sqrt(lsq_out[k][1][0, 0])\n result[k][\"e_mag_peak\"] = sqrt(lsq_out[k][1][1, 1])\n result[k][\"e_sigma_rise\"] = (\n sqrt(lsq_out[k][1][2, 2]) * result[k][\"sigma_rise\"]\n )\n result[k][\"e_sigma_fade\"] = (\n sqrt(lsq_out[k][1][3, 3]) * result[k][\"sigma_fade\"]\n )\n\n if verbose:\n ss = [\n \"{0:0.2f} ({1:0.2f}) \".format(result[k][x], result[k][\"e_\" + x])\n for x in (\"dtime_peak\", \"mag_peak\", \"sigma_rise\", \"sigma_fade\")\n ]\n logger.info(f\"rise/fade fit: {ss}\")\n\n # if we dont get a decent fit with errorbars,\n # check that the source is not only risng/fading\n else:\n\n if verbose:\n ss = [\n \"{0:0.2f} \".format(result[k][x])\n for x in (\"dtime_peak\", \"mag_peak\", \"sigma_rise\", \"sigma_fade\")\n ]\n logger.info(f\"rise/fade fit: {ss}\")\n\n if lsq_out[k][0][0] > (max(dtime[iflt]) - 3):\n dt_fix[k] = max(dtime[isdetect])\n p0 = [f2m(flux_peak_guess), log(sigma_rise_guess)]\n elif lsq_out[k][0][0] < (min(dtime[iflt]) + 3):\n dt_fix[k] = 0\n p0 = [f2m(flux_peak_guess), log(sigma_fade_guess)]\n\n if dt_fix[k] is not False:\n if verbose:\n logger.info(\n \"retrying fit without break, using x_peak fixed at {0:0.2f}\".format(\n dt_fix[k]\n )\n )\n lsq_out[k] = leastsq(\n res,\n p0,\n (dtime[iflt], flux[iflt], flux_err[iflt], broken_gauss, dt_fix[k]),\n full_output=True,\n )\n\n if verbose:\n logger.info(\n \"dt_fix fit: {0:0.2f} {1:0.2f}\".format(\n lsq_out[k][0][0], lsq_out[k][0][1]\n )\n )\n\n # overwrite results with single fit\n result[k][\"dtime_peak\"] = dt_fix[k]\n result[k][\"e_dtime_peak\"] = -1.0\n result[k][\"mag_peak\"] = flux2mag(m2f(lsq_out[k][0][0]))\n if lsq_out[k][1] is not None:\n result[k][\"e_mag_peak\"] = sqrt(lsq_out[k][1][0, 0])\n\n if dt_fix[k] > 0:\n result[k][\"sigma_rise\"] = exp(lsq_out[k][0][1])\n result[k][\"sigma_fade\"] = 0\n if lsq_out[k][1] is not None:\n result[k][\"e_sigma_rise\"] = (\n sqrt(lsq_out[k][1][1, 1]) * result[k][\"sigma_rise\"]\n )\n else:\n result[k][\"sigma_fade\"] = exp(lsq_out[k][0][1])\n result[k][\"sigma_rise\"] = 0\n if lsq_out[k][1] is not None:\n result[k][\"e_sigma_fade\"] = (\n sqrt(lsq_out[k][1][1, 1]) * result[k][\"sigma_fade\"]\n )\n\n result[k][\"n_rise\"] = int(\n sum(dtime[isdetect * iflt] < result[k][\"dtime_peak\"])\n )\n result[k][\"n_fade\"] = int(\n sum(dtime[isdetect * iflt] > result[k][\"dtime_peak\"])\n )\n\n flux_model = broken_gauss(lsq_out[k][0], dtime[iflt], dt_fix=dt_fix[k])\n flux_diff[iflt] = flux_model - flux[iflt]\n\n result[k][\"mad\"] = astropy.stats.median_absolute_deviation(\n flux_diff[isdetect]\n ) / np.median(flux[isdetect * iflt])\n result[k][\"rms\"] = np.std(flux_diff[isdetect]) / np.mean(\n flux[isdetect * iflt]\n )\n result[k][\"chi2\"] = sum(\n (flux_diff[iflt * isdetect] / flux_err[iflt * isdetect]) ** 2\n ) / (sum(isdetect) - len(p0))\n logger.info(\n \"chi2={0:0.2f}; RMS={1:0.2f}, MAD={2:0.2f}\".format(\n result[k][\"chi2\"], result[k][\"rms\"], result[k][\"mad\"]\n )\n )\n\n # ----------\n # fit both colors at the same time\n iflt_0 = fid == filters[0]\n iflt_1 = fid == filters[1]\n\n # at least 6 detection are needed given the number of free parameters\n # plus we require at least two detections in a given band,\n # otherwise the fit has too much freedom to go wild due to the color evolution term\n if (\n sum(isdetect) > 5\n and (sum(isdetect * iflt_0) > 1)\n and (sum(isdetect * iflt_1) > 1)\n ):\n\n if verbose:\n logger.info(f\"two color {sum(isdetect)} {len(dtime)}\")\n\n dt_fix[\"color\"] = False\n p0 = [\n t_peak_guess,\n f2m(flux_peak_guess),\n log(sigma_rise_guess),\n log(sigma_fade_guess),\n -0.1,\n 0.0,\n ]\n lsq_out[\"color\"] = leastsq(\n res,\n p0,\n (dtime, flux, flux_err, broken_gauss_twocolor, dt_fix[\"color\"], fid),\n full_output=True,\n ) # , maxfev=int(1e6), xtol=1e-99, ftol=1e-99)\n\n # store results\n result[\"color\"][\"dtime_peak\"] = lsq_out[\"color\"][0][0]\n result[\"color\"][\"mag_peak\"] = flux2mag(m2f(lsq_out[\"color\"][0][1]))\n result[\"color\"][\"sigma_rise\"] = exp(lsq_out[\"color\"][0][2])\n result[\"color\"][\"sigma_fade\"] = exp(lsq_out[\"color\"][0][3])\n result[\"color\"][\"mean_color\"] = lsq_out[\"color\"][0][4]\n result[\"color\"][\"color_slope\"] = lsq_out[\"color\"][0][5]\n\n if lsq_out[\"color\"][1] is not None:\n result[\"color\"][\"e_dtime_peak\"] = sqrt(lsq_out[\"color\"][1][0, 0])\n result[\"color\"][\"e_mag_peak\"] = sqrt(lsq_out[\"color\"][1][1, 1])\n result[\"color\"][\"e_sigma_rise\"] = (\n sqrt(lsq_out[\"color\"][1][2, 2]) * result[\"color\"][\"sigma_rise\"]\n )\n result[\"color\"][\"e_sigma_fade\"] = (\n sqrt(lsq_out[\"color\"][1][3, 3]) * result[\"color\"][\"sigma_fade\"]\n )\n result[\"color\"][\"e_mean_color\"] = sqrt(lsq_out[\"color\"][1][4, 4])\n result[\"color\"][\"e_color_slope\"] = sqrt(lsq_out[\"color\"][1][5, 5])\n\n if verbose:\n ss = [\n \"{0:0.2f} ({1:0.2f}) \".format(\n result[\"color\"][x], result[\"color\"][\"e_\" + x]\n )\n for x in (\n \"dtime_peak\",\n \"mag_peak\",\n \"sigma_rise\",\n \"sigma_fade\",\n \"mean_color\",\n \"color_slope\",\n )\n ]\n logger.info(f\"rise/fade fit: {ss}\")\n\n # again, if we dont get a decent fit with errorbars,\n # check that the source is not only rising/fading\n else:\n\n if verbose:\n ss = [\n \"{0:0.2f}\".format(result[\"color\"][x])\n for x in (\n \"dtime_peak\",\n \"mag_peak\",\n \"sigma_rise\",\n \"sigma_fade\",\n \"mean_color\",\n \"color_slope\",\n )\n ]\n logger.info(f\"rise/fade fit: {ss}\")\n\n if lsq_out[\"color\"][0][0] > (max(dtime) - 3):\n dt_fix[\"color\"] = max(dtime[isdetect])\n p0 = [f2m(flux_peak_guess), log(sigma_rise_guess), 0, -0.02]\n elif lsq_out[\"color\"][0][0] < (min(dtime) + 3):\n dt_fix[\"color\"] = 0\n p0 = [f2m(flux_peak_guess), log(sigma_fade_guess), 0, -0.02]\n\n if dt_fix[\"color\"] is not False:\n\n if verbose:\n logger.info(\n \"retrying fit without break, using x_peak fixed at {0:0.2f} \".format(\n dt_fix[\"color\"]\n )\n )\n\n if dt_fix[\"color\"] > 0:\n if verbose:\n logger.info(\"no allowing color evolution\")\n p0 = [f2m(flux_peak_guess), log(sigma_rise_guess), -0.1]\n else:\n p0 = [f2m(flux_peak_guess), log(sigma_rise_guess), -0.1, 0]\n\n lsq_out[\"color\"] = leastsq(\n res,\n p0,\n (dtime, flux, flux_err, broken_gauss_twocolor, dt_fix[\"color\"], fid),\n full_output=True,\n )\n\n if verbose:\n logger.info(\n \"two-color dt_fix: m_peak={0:0.2f} rise={1:0.2f} | color={2:0.3f}\".format(\n flux2mag(m2f(lsq_out[\"color\"][0][0])),\n exp(lsq_out[\"color\"][0][1]),\n lsq_out[\"color\"][0][2],\n )\n )\n\n # overwrite results with single shape two color fit\n result[\"color\"][\"dtime_peak\"] = dt_fix[\"color\"]\n result[\"color\"][\"e_dtime_peak\"] = -1\n result[\"color\"][\"mag_peak\"] = flux2mag(m2f(lsq_out[\"color\"][0][0]))\n result[\"color\"][\"mean_color\"] = lsq_out[\"color\"][0][2]\n if dt_fix[\"color\"] > 0:\n result[\"color\"][\"color_slope\"] = 0\n else:\n result[\"color\"][\"color_slope\"] = lsq_out[\"color\"][0][3]\n\n if lsq_out[\"color\"][1] is not None:\n result[\"color\"][\"e_mag_peak\"] = sqrt(lsq_out[\"color\"][1][1, 1])\n result[\"color\"][\"e_mean_color\"] = sqrt(lsq_out[\"color\"][1][2, 2])\n if dt_fix[\"color\"] > 0:\n result[\"color\"][\"e_color_slope\"] = -999\n else:\n result[\"color\"][\"e_color_slope\"] = sqrt(lsq_out[\"color\"][1][3, 3])\n\n if dt_fix[\"color\"] > 0:\n result[\"color\"][\"sigma_rise\"] = exp(lsq_out[\"color\"][0][1])\n result[\"color\"][\"sigma_fade\"] = 0\n if lsq_out[\"color\"][1] is not None:\n result[\"color\"][\"e_sigma_rise\"] = (\n sqrt(lsq_out[\"color\"][1][1, 1]) * result[\"color\"][\"sigma_rise\"]\n )\n else:\n result[\"color\"][\"sigma_fade\"] = exp(lsq_out[\"color\"][0][1])\n result[\"color\"][\"sigma_rise\"] = 0\n if lsq_out[\"color\"][1] is not None:\n result[\"color\"][\"e_sigma_fade\"] = (\n sqrt(lsq_out[\"color\"][1][1, 1]) * result[\"color\"][\"sigma_fade\"]\n )\n\n flux_model = broken_gauss_twocolor(\n lsq_out[\"color\"][0], dtime, dt_fix=dt_fix[\"color\"], fid=fid\n )\n flux_diff = flux_model - flux\n\n result[\"color\"][\"mad\"] = astropy.stats.median_absolute_deviation(\n flux_diff[isdetect]\n ) / np.median(flux[isdetect])\n result[\"color\"][\"rms\"] = np.std(flux_diff[isdetect]) / np.mean(flux[isdetect])\n result[\"color\"][\"chi2\"] = sum(\n (flux_diff[isdetect] / flux_err[isdetect]) ** 2\n ) / (sum(isdetect) - len(p0))\n logger.info(\n \"chi2={0:0.2f}; RMS={1:0.2f}, MAD={2:0.2f}\".format(\n result[\"color\"][\"chi2\"], result[\"color\"][\"rms\"], result[\"color\"][\"mad\"]\n )\n )\n\n result[\"color\"][\"n_rise\"] = int(\n sum(dtime[isdetect] <= result[\"color\"][\"dtime_peak\"])\n )\n result[\"color\"][\"n_fade\"] = int(\n sum(dtime[isdetect] >= result[\"color\"][\"dtime_peak\"])\n )\n\n result[\"dt_fix\"] = dt_fix\n\n return result, lsq_out, flux_diff\n\n\n# make some higher-level statements about the light curve\ndef flex_class(result):\n\n photoclass = \"\"\n chat_bot = \"\"\n\n # check that we have any results (as selected by flex_fit_wclip)\n if not \"band\" in result:\n return \"\", \"no fit\"\n\n # print color information\n if (result[\"band\"] == \"color\") and (\n (result[\"n_rise\"] > 2) or (result[\"n_fade\"] > 2)\n ):\n\n if \"e_mean_color\" in result:\n if abs(result[\"e_mean_color\"]) < 0.1:\n chat_bot += \"={0:0.2f}+-{1:0.2f}, \".format(\n result[\"mean_color\"], np.clip(result[\"e_mean_color\"], 0, 999)\n )\n\n # print info if we are on the rise\n if result[\"n_rise\"] > 1:\n if result[\"sigma_rise\"] < 300:\n if result[\"n_fade\"] == 0:\n chat_bot += \"rising, \"\n elif result[\"n_fade\"] < 2:\n chat_bot += \"at/near peak, \"\n\n # check slope of rise time for SNe or CV like behaviour\n if not (\"e_sigma_rise\" in result):\n result[\"e_sigma_rise\"] = 999 # fix for printing\n\n if not (\"e_sigma_fade\" in result):\n result[\"e_sigma_fade\"] = 999\n\n # slow rise time is quite rare, can be AGN, TDE, SN II\n if result[\"sigma_rise\"] > 10:\n photoclass = \"TDE?\"\n\n chat_bot += \"slow rise time (sigma={0:0.1f}+-{1:0.1f} d), \".format(\n result[\"sigma_rise\"], np.clip(result[\"e_sigma_rise\"], 0, 999)\n )\n\n # if also blue this could be a TDE\n if (result[\"band\"] == \"color\") and (result[\"mean_color\"] < 0.0):\n photoclass = \"TDE\"\n elif result[\"sigma_rise\"] > 4:\n chat_bot += \"SN-like rise time (sigma={0:0.1f}+-{1:0.1f} d), \".format(\n result[\"sigma_rise\"], np.clip(result[\"e_sigma_rise\"], 0, 999)\n )\n # \tphotoclass = 'SN?'\n\n else:\n chat_bot += \"very slow rise, \"\n if (result[\"n_fade\"] > 2) and (result[\"sigma_fade\"] > 500):\n photoclass = \"AGN?\"\n\n if (\n (result[\"n_rise\"] > 1)\n and (result[\"sigma_rise\"] < 2)\n and (\"e_sigma_rise\" in result)\n ):\n chat_bot += \"warning, steep increase (sigma={0:0.1f}+-{1:0.1f}), \".format(\n result[\"sigma_rise\"], np.clip(result[\"e_sigma_rise\"], 0, 999)\n )\n photoclass = \"CV?\"\n\n if (result[\"n_rise\"] < 2) and (result[\"sigma_fade\"] < 500):\n chat_bot += \"missed peak(?), \"\n\n # print info for fading or constant sources\n if (result[\"n_fade\"] > 2) and (\"e_sigma_fade\" in result):\n\n if result[\"sigma_fade\"] < 1000:\n\n chat_bot += \"fading (tau={0:0.1f} +- {1:0.1f} d), \".format(\n result[\"sigma_fade\"], np.clip(result[\"e_sigma_fade\"], 0, 999)\n )\n\n elif result[\"n_fade\"] > 3:\n\n chat_bot += \"very long fading time, \"\n\n # if fading time is actually measured properly, do classification\n if \"e_sigma_fade\" in result:\n if (result[\"sigma_fade\"] / result[\"e_sigma_fade\"]) > 2:\n if not photoclass:\n photoclass = \"AGN?\"\n elif \"color_slope\" in result:\n if abs(result[\"color_slope\"]) < 0.015:\n photoclass = \"not SN?\"\n\n # do checks on color and slope\n if (result[\"band\"] == \"color\") and (\"e_mean_color\" in result):\n\n # add label if blue\n if (result[\"mean_color\"] < -0.2) and (abs(result[\"e_mean_color\"]) < 0.1):\n chat_bot += \"blue! ({0:0.1f}+-{1:0.1f}), \".format(\n result[\"mean_color\"], np.clip(result[\"e_mean_color\"], 0, 999)\n )\n\n # type as TDE if constant color and blue (since May 2019 this now includes ~constant post-peak flux, plus cut on e_color_slope 0.015)\n if result[\"e_color_slope\"] < 0.015:\n\n if result[\"color_slope\"] < -0.015:\n chat_bot += \"getting more blue?! ({0:0.3f}+-{1:0.3f}), \".format(\n result[\"color_slope\"], np.clip(result[\"e_color_slope\"], 0, 999)\n )\n if (result[\"mean_color\"] < 0) and (result[\"sigma_fade\"] > 5):\n photoclass = \"TDE\"\n elif result[\"mean_color\"] < 0.1:\n photoclass = \"TDE?\"\n\n elif abs(result[\"color_slope\"]) < 0.015:\n if result[\"e_color_slope\"] > 0:\n chat_bot += \"near-constant color ({0:0.3f}+-{1:0.3f}) \".format(\n result[\"color_slope\"], np.clip(result[\"e_color_slope\"], 0, 999)\n )\n if (result[\"mean_color\"] < 0) and (result[\"sigma_fade\"] > 5):\n photoclass = \"TDE\"\n elif result[\"mean_color\"] < 0.1:\n photoclass = \"TDE?\"\n\n elif result[\"color_slope\"] < 0.020:\n chat_bot += \"cooling? ({0:0.3f}+-{1:0.3f}), \".format(\n result[\"color_slope\"], np.clip(result[\"e_color_slope\"], 0, 999)\n )\n if (result[\"mean_color\"] < -0.1) and (result[\"sigma_fade\"] > 5):\n photoclass = \"TDE\"\n elif result[\"mean_color\"] < 0.0:\n photoclass = \"TDE?\"\n\n elif (result[\"color_slope\"] >= 0.020) and (result[\"sigma_fade\"] < 100):\n chat_bot += \"cooling ({0:0.3f}+-{1:0.3f}), \".format(\n result[\"color_slope\"], np.clip(result[\"e_color_slope\"], 0, 999)\n )\n if result[\"color_slope\"] != minmaxcs:\n photoclass = \"SN\"\n\n # check fast fading\n if (result[\"n_fade\"] > 2) and (result[\"sigma_fade\"] < 5):\n chat_bot += \"warning fast fading, \"\n photoclass = \"fast/weird\"\n\n if not photoclass:\n photoclass = \"unknown\"\n\n if result[\"chi2\"] > 10:\n chat_bot += \"warning, poor fit (chi2/dof={0:0.1f})\".format(result[\"chi2\"])\n # photoclass +='_badfit'\n\n return chat_bot.strip(\", \"), photoclass\n","repo_name":"AmpelAstro/Ampel-nuclear","sub_path":"ampel/nuclear/flexfit/flexfit.py","file_name":"flexfit.py","file_ext":"py","file_size_in_byte":28332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27622190162","text":"# This question is essentially asking for the sum of totients <= 1000000. To\n# do this, I use a totient sieve\nMAX_DENOMINATOR = 1000000\n\ndef count_fractions():\n # See http://math.stackexchange.com/questions/316376/how-to-calculate-these-totient-summation-sums-efficiently\n\n # phi is the list of totients\n phi = list(range(MAX_DENOMINATOR+1))\n\n for i in range(2, MAX_DENOMINATOR+1):\n if phi[i] == i:\n for j in range(i, MAX_DENOMINATOR+1, i):\n phi[j] -= phi[j] // i\n\n print(list(range(MAX_DENOMINATOR+1)))\n print(phi)\n\n # Don't count the tot(1) == 1\n return sum(phi) - 1\n\ndef main():\n answer = count_fractions()\n print(answer)\n\nif __name__ == '__main__':\n main()","repo_name":"pf981/project-euler","sub_path":"072_counting_fractions2.py","file_name":"072_counting_fractions2.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"12804604920","text":"from pyspark.sql import SparkSession\nfrom pyspark.sql.types import StructType, StringType, StructField, IntegerType, DoubleType\nfrom pyspark.sql.functions import *\n\nif __name__ == '__main__':\n spark = SparkSession.builder.master(\"local[*]\").appName(\"Filter data\").getOrCreate()\n\n schema_data = StructType([\n StructField(\"id\", IntegerType(), False), # nullable false\n StructField(\"name\", StringType()),\n StructField(\"gender\", StringType()),\n StructField(\"city\", StringType()),\n StructField(\"salary\", DoubleType())\n ])\n\n df = spark.read.load(r\"C:\\Users\\Tejas\\PycharmProjects\\pythonProject\\input_data\\employee.csv\",\n format=\"csv\", schema=schema_data)\n\n #df.filter(df.gender == \"male\").select(\"id\", \"name\", \"gender\").show()\n\n # df.filter(df.gender.startswith(\"m\")).show()\n df1 = df.withColumn(\"salary\", col(\"salary\").cast(\"Integer\"))\n df1.printSchema()\n df1.filter(max(df1.salary)).show()","repo_name":"tejas078/pythonProject","sub_path":"venv/filteroutdata.py","file_name":"filteroutdata.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36660022483","text":"# WAP that tokenizes a string and display the new string with reversed tokens.\nx = 'he is a good boy.'\nb = ''\nz = ''\nfor i in x:\n if i != ' ':\n b = i+b\n elif i == ' ':\n z = z+' '+b\n b = ''\nprint(z)\n","repo_name":"Nischal1101/python-","sub_path":"assignments/Day4_HW/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18708449966","text":"\nimport os\n\nWIT_AI_CLIENT_ACCESS_TOKEN = 'JV6WLCBDLTXIJUAOQ2MOOFBR7DUZQON7' # Put your Client Access Token Here\nthresholdConfidence = 0.79 # WIT intent confidence to proceed with execution\nskipLogInteraction = True\ndebugMode = True\ntemplatesLocation = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'templates/')\ncustomEventIDWitResponse = 'WitResponseEvent'\ncustomEventIDPopupMessage = 'PopupMessageEvent'\ncustomEventIDPaletteMessage = 'PaletteMessageEvent'\n","repo_name":"fischjer4/Kora-Voice-Assistant","sub_path":"Kora/main/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"40444744887","text":"from tkinter import *\nfrom tkinter import ttk\nfrom libs.Config.Settings import Settings\nfrom libs.Classes.Db import DataBase\n\n\nclass MainWindow:\n \"\"\"Главное окно программы\"\"\"\n\n def __init__(self, active_user):\n \"\"\"Инициализация главного окна\"\"\"\n self.user = active_user\n\n # Инициализация главного окна\n self.main_win = Tk()\n self.main_win.title(Settings().TITLE + ' Version: ' + Settings().VERSION)\n self.main_win.iconbitmap(Settings().FAVICON)\n self.main_win.resizable(False, False)\n\n # Центрироване окна\n width = self.main_win.winfo_screenwidth()\n height = self.main_win.winfo_screenheight()\n self.main_win.geometry(f'700x500+{width // 2 - 350}+{height // 2 - 250}')\n\n # Меню\n self.menubar = Menu(self.main_win)\n self.main_win.config(menu=self.menubar)\n # Файл\n file_menu = Menu(self.menubar, tearoff=0)\n file_menu.add_command(label=\"Сменить пользователя\", command=self.change_user)\n file_menu.add_command(label=\"Выйти\", command=self.on_exit)\n self.menubar.add_cascade(label=\"Файл\", menu=file_menu)\n\n # Вкладки\n rows = 0\n while rows < 50:\n self.main_win.rowconfigure(rows, weight=1)\n self.main_win.columnconfigure(rows, weight=1)\n rows += 1\n tabs = ttk.Notebook(self.main_win)\n tabs.grid(row=1, column=0, columnspan=50, rowspan=49, sticky='NESW')\n\n # Вкладка Список дел\n todo_win = ttk.Frame(tabs)\n tabs.add(todo_win, text='Список дел')\n todo_win_rows = 0\n while todo_win_rows < 50:\n todo_win.rowconfigure(todo_win_rows, weight=1)\n todo_win.columnconfigure(todo_win_rows, weight=1)\n todo_win_rows += 1\n tabs_todo = ttk.Notebook(todo_win)\n tabs_todo.grid(row=1, column=0, columnspan=50, rowspan=49, sticky='NESW')\n\n # Текущие\n current_task = ttk.Frame(tabs_todo)\n tabs_todo.add(current_task, text='Текущие')\n btn_add_task = ttk.Button(current_task, text='Добавить задачу', command=self.add_task)\n btn_add_task.grid(row=0, column=0, pady=20, padx=10, sticky=N)\n tasks_listbox = Listbox(current_task)\n tasks = self.get_tasks_by_user_id(10)\n if not tasks:\n Label(current_task, text='Задач пока нет...').grid(row=1, column=0, columnspan=2)\n else:\n task_i = 1\n for task in tasks:\n text = str(task_i) + '. ' + task['text'][:30]\n if len(task['text']) > 30:\n text += '...'\n if task['priority'] == 1:\n text = '* ' + text\n tasks_listbox.insert(END, text)\n task_i += 1\n tasks_listbox.grid(row=1, column=0, sticky=N+S+W+E)\n tasks_listbox.bind(\"<>\", self.edit_task)\n textarea_task = Text(current_task)\n textarea_task.grid(row=0, rowspan=2, column=1, columnspan=3, sticky=N+S+W+E)\n btn_save = ttk.Button(current_task, text='Сохранить')\n btn_delete = ttk.Button(current_task, text='Удалить')\n btn_complete = ttk.Button(current_task, text='Выполнено')\n btn_save.grid(row=2, column=1)\n btn_delete.grid(row=2, column=2)\n btn_complete.grid(row=2, column=3)\n\n # Выполненные\n done_task = ttk.Frame(tabs_todo)\n tabs_todo.add(done_task, text='Выполненные')\n\n # Вкладка Бухгалтер\n accountant_win = ttk.Frame(tabs)\n tabs.add(accountant_win, text='Финансы')\n\n # Запуск\n self.main_win.mainloop()\n\n def edit_task(self):\n pass\n\n def add_task(self):\n pass\n\n def get_tasks_by_user_id(self, count):\n query = 'SELECT * FROM `tasks` WHERE `user_id` = ' + str(\n self.user['id']) + ' AND `status` = \"in_progress\" ORDER BY `priority` DESC'\n return DataBase().select(query, 'all')\n\n def on_exit(self):\n self.main_win.destroy()\n\n def change_user(self):\n from libs.windows.EnterForm import EnterForm\n del self.user\n self.main_win.destroy()\n EnterForm()\n","repo_name":"dyinglev/test","sub_path":"libs/windows/MainWindow.py","file_name":"MainWindow.py","file_ext":"py","file_size_in_byte":4420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31850708671","text":"from rooms.chat_room import ChatRoom\nfrom rooms.room_manager import RoomManager\nimport os\n\nfrom flask import Flask\nfrom flask_socketio import SocketIO, emit, send, join_room, rooms\n\nimport sys\nsys.path.append('./rooms')\n\n\napp = Flask(__name__)\napp.config[\"SECRET_KEY\"] = os.getenv(\"SECRET_KEY\")\nsocketio = SocketIO(app)\n\nroom_manager = RoomManager()\n\n\n@app.route(\"/\")\ndef index():\n return \"Project 2: TODO\"\n\n\n@socketio.on('message')\ndef handle_message(data):\n room_manager.get_room(data['room']).add_message(\n data['user'], data['body'], data['timestamp'])\n send({\"body\": data['body'], \"author\": data['user'],\n \"timestamp\": data['timestamp']}, room=data['room'], broadcast=False)\n\n\n@socketio.on('join')\ndef _join_room(room_name):\n join_room(room_name)\n if room_name in room_manager.get_room_names():\n room = room_manager.get_room(room_name)\n else:\n room = ChatRoom(room_name)\n room_manager.add_room(room)\n emit('room-list', room_manager.get_room_names())\n emit('messages', room_manager.get_room(room_name).get_messages())\n\n\nif __name__ == '__main__':\n socketio.run(app)\n","repo_name":"JuanLucha/simplet-chat","sub_path":"backend/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70749897974","text":"from utils import create_a_folder_with_readme, get_host\nimport os\nimport click\nimport logging\n\nfrom utils import get_username\nfrom utils import get_remote_credentials_for_current_user\n\nfrom provider.factory import get_provider\n\nfrom constants import PROVIDERS\n\nlogger = logging.getLogger(__name__)\n\n\n@click.group(\"create\")\ndef create():\n \"\"\"Create a new repository.\"\"\"\n pass\n\n\n@click.command(\"repo\")\n@click.option(\"--name\", \"-n\", help=\"Name of the repository\", required=True)\n@click.option(\"--path\", \"-p\", help=\"Path of the repository\", default=\"\")\ndef repo(name, path):\n \"\"\"Create a new repository.\"\"\"\n\n if not path:\n new_path = os.getcwd()\n else:\n if os.path.exists(path):\n new_path = path\n else:\n click.echo(f\"\\\"{path}\\\" : Path does not exist!\")\n return\n\n logger.log(logging.INFO, \"Creating a new repository...\")\n click.echo(\"Creating a new repository...\")\n click.echo(\"Name: {}\".format(name))\n click.echo(\"Path: {}\".format(new_path))\n\n # Create a folder with a readme file\n folder_path = create_a_folder_with_readme(name, new_path)\n\n # Create a repo in remote host.\n host = get_host()\n\n def _resolve_host_name(host):\n for key, value in PROVIDERS.items():\n if host in value:\n return key\n return None\n\n # Returns a provider object based on the host name\n provider = get_provider(_resolve_host_name(host))\n\n current_user = get_username()\n\n creds = get_remote_credentials_for_current_user(current_user)\n token = creds[\"password\"]\n remote_repo = provider(username=current_user, token=token)\n remote_repo.data = {\n \"name\": f\"{name}\",\n \"description\": f\"Created by {current_user} from gitmate.\"\n }\n remote_repo.create_repo()\n\n # link both of them\n\n # init on local\n\n # push to remote\n\n\ncreate.add_command(repo)\n","repo_name":"Sayyed-Salman/gitmate-mvp","sub_path":"gitmate/commands/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6353398329","text":"from functions.determinize import determinize\nfrom functions.equal import equal\nfrom functions.minimize import minimize\nfrom functions.thompson import Thompson\nfrom models import FiniteAutomaton, Regex\nfrom functions import registry\n\n\n@registry.register(registry.FunctionType.PREDICATE)\ndef equiv(self: FiniteAutomaton | Regex, other: FiniteAutomaton | Regex) -> bool:\n if isinstance(self, FiniteAutomaton) and isinstance(other, FiniteAutomaton):\n return __equiv_nfa(self, other)\n elif isinstance(self, Regex) and isinstance(other, Regex):\n self_nfa, other_nfa = Thompson(self), Thompson(other)\n return __equiv_nfa(self_nfa, other_nfa)\n else:\n raise TypeError(\n f'Ожидались аргументы типов (Regex, Regex) или (FiniteAutomaton, FiniteAutomaton), '\n f'получили: {type(self)} и {type(other)}'\n )\n\n\ndef __equiv_nfa(self: FiniteAutomaton, other: FiniteAutomaton) -> bool:\n self_min_dfa = minimize(determinize(self))\n other_min_dfa = minimize(determinize(other))\n return equal(self_min_dfa, other_min_dfa)\n","repo_name":"Postlog/FLT-Labs","sub_path":"lab2-command/functions/equiv.py","file_name":"equiv.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"9146589595","text":"import numpy as np\nfrom PIL import Image\nimport cv2 as cv\nimport torch\nfrom .motion_processing import process_motion_to_latents, save_motion\n\n\ndef save(trail, output_path, visualizations=None, median=None):\n \"\"\"\n Computes the flow from the last 4 images in the stack\n :param trail: [>=10, H, W, 3] array\n :param output_path: a path to the numpy data file to write\n :param visualizations: List[ProcessingVisualization] for visualizations\n :param median: Optional[nd.array<128, 128>] if the median is known from other sources than the trail\n \"\"\"\n if median is None:\n median = np.median(trail[:-4], axis=0)\n\n for i, frame in enumerate(trail[-4:]):\n save_frame(frame, median, output_path.format(i), visualizations=visualizations)\n\n\ndef save_frame(frame, median, output_path, visualizations=None):\n \"\"\"\n Computes the flow from frame2 to frame1\n This inverse order is done as such that the vectors are high at the spot in frame2 were the moving object is,\n i.e. inverse movement to map more directly to z_pres, Saved object is [H, W] in [0, 1]\n :param frame: [H, W, 3] array\n :param median: [H, W, 3] array describing the median\n :param output_path: a path to the numpy data file to write\n :param visualizations: List[ProcessingVisualization] for visualizations\n \"\"\"\n median_delta = np.abs(frame - median)\n median_delta = np.max(median_delta, axis=-1)\n delta_max = median_delta.max()\n median_delta = median_delta / delta_max if delta_max > 0 else median_delta\n save_motion(frame, median_delta, output_path)\n for vis in visualizations:\n vis.save_vis(frame, median_delta)\n","repo_name":"k4ntz/MOC","sub_path":"src/motion/median.py","file_name":"median.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"22323315098","text":"#!/usr/bin/python3\nimport re\nimport copy\nimport emoji\nimport discord\nimport logging\nimport pymongo\nimport configparser\nfrom discord.ext import commands\nfrom template import GUILD_TEMPLATE\n\n\n\nclass Role(commands.Cog):\n\n\tdef __init__(self, bot: commands.Bot, db: pymongo.database.Database):\n\t\tself.bot = bot\n\t\tself.db = db\n\n\t\tself.default_emoji_role_pattern = re.compile(\" <@&([\\d]+)> .\")\n\t\tself.custom_emoji_role_pattern = re.compile(\" <@&([\\d]+)> (<:[a-z_]+:[\\d]+>)\")\n\n\n\tdef split(self, string, splitter = \" \"):\n\t\tif splitter not in string:\n\t\t\treturn None\n\n\t\tsplit_arr = string[string.find(\" \")+len(splitter):].split(splitter)\n\t\tif len(split_arr) == 1 and split_arr[0] == \"\":\n\t\t\treturn None\n\n\t\treturn split_arr\n\n\n\n\t@commands.command(pass_context=True, help=\"create a role and assign to the user\")\n\t@commands.has_permissions(manage_roles=True)\n\tasync def roleCreate(self, ctx: commands.Context):\n\n\t\targs = self.split(ctx.message.content)\n\t\tif not args or len(args) > 2:\n\t\t\tawait ctx.send(\"Invalid parameters. Please specify the role name (and color) separated by **double spaces**. Color should be in the form of \\\"(r, g, b)\\\" if you want to specify the color.\\nExample: \\n\\t\\\"roleAdd role_name\\\"\\n\\t\\\"roleAdd role_name color\\\"\")\n\t\t\treturn\n\t\t\n\t\trole_name = args[0]\n\t\t\n\t\troles = await ctx.guild.fetch_roles()\n\t\tif role_name in [role.name for role in roles]:\n\t\t\tawait ctx.send(\"The role name already exists. Please choose another name.\")\n\t\t\treturn\n\n\t\t# Setting color\n\t\tif len(args) == 2:\n\t\t\ttry:\n\t\t\t\tcolor = discord.Colour.from_rgb([int(v) for v in args[1][1:-1].split(\",\")])\n\t\t\texcept:\n\t\t\t\tawait ctx.send(\"Invalid color. Please specify the role name (and color) separated by **double spaces**. Color should be in the form of \\\"(r, g, b)\\\" if you want to specify the color.\\nExample: \\\"roleCreate role_name\\\"\\n\\\"roleAdd role_name color\\\"\")\n\t\t\t\treturn\n\n\t\telse:\n\t\t\tcolor = discord.Colour.random()\n\t\t\t\n\n\t\t# Create the role and assign the author to this role\n\t\trole = await ctx.guild.create_role(name=role_name, color=color)\n\t\tawait ctx.author.add_roles(role)\n\n\n\n\t@commands.command(pass_context=True, help=\"assign a role to the user\")\n\t@commands.has_permissions(manage_roles=True)\n\tasync def roleAdd(self, ctx: commands.Context):\n\t\targs = self.split(ctx.message.content)\n\n\t\tif not args or len(args) > 1:\n\t\t\tawait ctx.send(\"Invalid parameters. Please specify the role name (and color) separated by **double spaces**. Color should be in the form of \\\"(r, g, b)\\\" if you want to specify the color.\\nExample: \\n\\t\\\"roleAdd role_name\\\"\")\n\t\t\treturn\n\n\t\troles = await ctx.guild.fetch_roles()\n\t\tif args[0] not in [role.name for role in roles]:\n\t\t\tawait ctx.send(\"No such role. Please specify an existing role.\")\n\t\t\treturn\n\n\t\tawait ctx.author.add_roles(discord.utils.get(ctx.guild.roles, name=args[0]))\n\n\n\n\n\t@commands.command(pass_context=True, help=\"bind a role with emoji\")\n\t@commands.has_permissions(manage_roles=True)\n\tasync def roleBind(self, ctx: commands.Context):\n \n\n\t\tdef getEmoji(s):\n\t\t\temo = [c for c in s if c in emoji.UNICODE_EMOJI[\"en\"]]\n\t\t\tif emo:\n\t\t\t\treturn emo[0]\n\t\t\telse:\n\t\t\t\treturn None\n\n\t\t# Retrieve content\n\t\tcontent = ctx.message.content\n\t\temo = getEmoji(content)\n\t\t\n\n\t\t# Extract role and emoji\n\t\tsuccess_match = False\n\t\tde_content = emoji.demojize(ctx.message.content)\n\n\t\tdefault_result = self.default_emoji_role_pattern.search(de_content)\n\t\tif default_result and emo:\n\t\t\trole_id, emoji_to_bind = default_result.group(1), emo\n\t\t\tsuccess_match = True\n\t\t\n\t\tcustom_result = self.custom_emoji_role_pattern.search(content)\n\t\tif custom_result:\n\t\t\trole_id, emoji_to_bind = custom_result.group(1), custom_result.group(2)\n\t\t\tsuccess_match = True\n\n\t\t# Further check if role is legal\n\t\tif success_match:\n\t\t\troles = await ctx.guild.fetch_roles()\n\t\t\tif role_id not in [str(x.id) for x in roles]:\n\t\t\t\tsuccess_match = False\n\n\t\t# No patterns found or role invalid\n\t\tif not success_match:\n\t\t\tawait ctx.send(\"Please follow the syntax to bind emoji to the role.\\n\\\n\t\t\t\t\t\t\t\\t=roleBind role_to_bind emoji_to_represent\")\n\t\t\treturn\n\n\n\t\t# Bind role and emoji, save into database\n\t\tquery_result = self.db[\"guild_info\"].find_one({\"guild_id\": ctx.guild.id})\n\n\t\tif not query_result:\n\n\t\t\tdata_entry = copy.deepcopy(GUILD_TEMPLATE)\n\t\t\tdata_entry[\"guild_id\"] = ctx.guild.id\n\t\t\tdata_entry[\"roles\"][role_id] = emoji_to_bind\n\t\t\tself.db[\"guild_info\"].insert_one(data_entry)\n\n\t\telse:\n\n\t\t\tif role_id in query_result[\"roles\"].keys():\n\t\t\t\tawait ctx.send(\"This role already has its emoji.\")\n\t\t\telse:\n\t\t\t\tself.db[\"guild_info\"].update_one(query_result, {\"$set\": {\n\t\t\t\t\t\"roles.\"+role_id: emoji_to_bind\n\t\t\t\t}})\n\n\n\n\t@commands.command(pass_context=True, help=\"role reactive message\")\n\t@commands.has_permissions(manage_roles=True)\n\tasync def roleReact(self, ctx: commands.Context):\n\t\tquery_result = self.db[\"guild_info\"].find_one({\"guild_id\": ctx.guild.id})\n\t\tif not query_result:\n\t\t\tawait ctx.send(\"No roles available.\")\n\t\t\treturn\n\n\t\tmsg = await ctx.send(\"Select your roles to bind\")\n\t\tself.db[\"guild_info\"].update_one(query_result, {\"$set\": {\n\t\t\t\t\t\"role_react_id\": msg.id\n\t\t\t\t}})\n\n\t\tfor emo in query_result[\"roles\"].values():\n\t\t\tawait msg.add_reaction(emo)\n\n\n\n\t@commands.Cog.listener()\n\tasync def on_raw_reaction_add(self, reaction_payload):\n\t\ttry:\n\t\t\tuser = await self.bot.fetch_user(reaction_payload.user_id)\n\t\t\tif user == self.bot.user:\n\t\t\t\treturn\n\n\t\t\tguild = await self.bot.fetch_guild(reaction_payload.guild_id)\n\t\t\tchannel = await self.bot.fetch_channel(reaction_payload.channel_id)\n\t\t\tmessage = await channel.fetch_message(reaction_payload.message_id)\n\t\t\temo = str(reaction_payload.emoji)\n\n\n\t\t\tquery_result = self.db[\"guild_info\"].find_one({\"guild_id\": guild.id})\n\t\t\tif not query_result:\n\t\t\t\treturn\n\n\t\t\tself.bot.fetch_offline_members = True\n\t\t\tmember = await guild.query_members(user_ids=[user.id])[0]\n\n\t\t\tif query_result:\n\n\t\t\t\tif message.id == query_result[\"role_react_id\"]:\n\t\t\t\t\tfor role_id, e in query_result[\"roles\"].items():\n\t\t\t\t\t\tif e == emo:\n\t\t\t\t\t\t\tawait member.add_roles(discord.utils.get(guild.roles, id=int(role_id)))\n\t\t\t\t\t\t\treturn\n\t\texcept:\n\t\t\tpass","repo_name":"Kevincj/DiscordMule","sub_path":"role.py","file_name":"role.py","file_ext":"py","file_size_in_byte":6038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42289592618","text":"__author__ = 'bmiller'\n\nfrom docutils import nodes\nfrom docutils.parsers.rst import directives\nfrom docutils.parsers.rst import Directive\nimport json\nimport os\n\n# try:\n# import conf\n# version = conf.version\n# staticserver = conf.staticserver\n# except:\n# version = '2.1.0'\n# staticserver = 'runestonestatic.appspot.com'\n\ndef setup(app):\n app.add_directive('activecode',ActiveCode)\n\n# \t \n# \t\n# \t \n# \t \n# \t \n# \t \n# \t \n# \t \n# \t\n\n\n# If you want to play with the codemirror files, uncomment \n# [codemirror.js, python.js, matchbrackets.js, and active-line.js]\n# and comment out [pywindowCodemirrorC.js].\n\n app.add_stylesheet('codemirrorEdited.css')\n app.add_javascript('http://ajax.googleapis.com/ajax/libs/jquery/1.9.0/jquery.min.js')\n# app.add_javascript('codemirror.js' )\n# app.add_javascript('python.js' )\n# app.add_javascript('matchbrackets.js')\n# app.add_javascript('active-line.js')\n app.add_javascript('pywindowCodemirrorC.js' )\n app.add_javascript('skulpt.min.js' )\n app.add_javascript('skulpt-stdlib.js')\n app.add_javascript('aopsmods.js')\n\n app.add_node(ActivcodeNode, html=(visit_ac_node, depart_ac_node))\n\n app.connect('doctree-resolved',process_activcode_nodes)\n app.connect('env-purge-doc', purge_activecodes)\n\n\n\nSTART = '''\n
\n'''\n\n# had cols=\"%(cols)d\" in the textarea spec\nEDIT1 = '''\n
\n\n
\n\n'''\n\nEDITRUN = '''\n
\n\n\n\n
\n
\n'''\n\nEDITERROR = '''\n
\n'''\n\nUNHIDE='''\n\n'''\n\nCANVAS = '''\n
\n\n
\n'''\n\nSUFF = '''
%(suffix)s
'''\n\nPRE = '''\n
\n\n
\n\n
\n'''\n\nEND = '''\n
\n\n'''\n\nAUTO = '''\n\n'''\n\nLATEX = '''\n\\\\begin{verbatim}\n%(initialcode)s\n\\\\end{verbatim}\n'''\n\nclass ActivcodeNode(nodes.General, nodes.Element):\n def __init__(self,content):\n \"\"\"\n\n Arguments:\n - `self`:\n - `content`:\n \"\"\"\n super(ActivcodeNode,self).__init__()\n self.ac_components = content\n\n# self for these functions is an instance of the writer class. For example\n# in html, self is sphinx.writers.html.SmartyPantsHTMLTranslator\n# The node that is passed as a parameter is an instance of our node class.\ndef visit_ac_node(self,node):\n #print self.settings.env.activecodecounter\n res = START\n if 'above' in node.ac_components:\n res += CANVAS\n res += EDIT1\n if 'norun' not in node.ac_components:\n res += EDITRUN\n res += EDITERROR \n if 'above' not in node.ac_components:\n if 'nocanvas' not in node.ac_components and 'norun' not in node.ac_components:\n res += CANVAS\n if 'hidecode' not in node.ac_components:\n node.ac_components['hidecode'] = 'block'\n if node.ac_components['hidecode'] == 'none':\n res += UNHIDE\n if 'suffix' in node.ac_components:\n res += SUFF\n if 'nopre' not in node.ac_components and 'norun' not in node.ac_components:\n res += PRE\n if 'autorun' in node.ac_components:\n res += AUTO\n res += END\n res = res % node.ac_components\n res = res.replace(\"u'\",\"'\") # hack: there must be a better way to include the list and avoid unicode strings\n\n self.body.append(res)\n\ndef visit_ac_node_latex(self,node):\n res = LATEX % node.ac_components\n self.body.append(res)\n\ndef depart_ac_node(self,node):\n ''' This is called at the start of processing an activecode node. If activecode had recursive nodes\n etc and did not want to do all of the processing in visit_ac_node any finishing touches could be\n added here.\n '''\n pass\n\n\ndef process_activcode_nodes(app,env,docname):\n pass\n\n\ndef purge_activecodes(app,env,docname):\n pass\n\n\nclass ActiveCode(Directive):\n required_arguments = 1\n optional_arguments = 1\n has_content = True\n option_spec = {\n 'nocanvas':directives.flag,\n 'nopre':directives.flag,\n 'above':directives.flag, # put the canvas above the code\n 'autorun':directives.flag,\n 'norun':directives.flag,\n 'caption':directives.unchanged,\n 'include':directives.unchanged,\n 'hidecode':directives.flag,\n 'nolinenums':directives.flag,\n 'tour_1':directives.unchanged,\n 'tour_2':directives.unchanged,\n 'tour_3':directives.unchanged,\n 'tour_4':directives.unchanged,\n 'tour_5':directives.unchanged,\n 'rows':directives.positive_int,\n 'cols':directives.positive_int,\n 'cheight':directives.positive_int,\n 'cwidth':directives.positive_int\n }\n\n def run(self):\n env = self.state.document.settings.env\n # keep track of how many activecodes we have.... could be used to automatically make a unique id for them.\n if not hasattr(env,'activecodecounter'):\n env.activecodecounter = 0\n env.activecodecounter += 1\n\n self.options['divid'] = self.arguments[0]\n\n# if 'cols' not in self.options:\n# self.options['cols'] = min(65,max([len(x) for x in self.content]))\n if 'rows' not in self.options:\n self.options['rows'] = len(self.content)\n if 'cheight' not in self.options:\n self.options['cheight'] = 400\n if 'cwidth' not in self.options:\n self.options['cwidth'] = 400\n\n if 'nolinenums' in self.options:\n self.options['linenumflag'] = 'false'\n else: \n self.options['linenumflag'] = 'true'\n if 'norun' in self.options:\n self.options['readonlyflag'] = 'true'\n else:\n self.options['readonlyflag'] = 'false'\n if self.content:\n if '====' in self.content:\n idx = self.content.index('====')\n source = \"\\n\".join(self.content[:idx])\n suffix = \"\\n\".join(self.content[idx+1:])\n else:\n source = \"\\n\".join(self.content)\n suffix = \"\\n\"\n else:\n source = '\\n'\n suffix = '\\n'\n\n self.options['initialcode'] = source\n self.options['suffix'] = suffix\n str=source.replace(\"\\n\",\"*nline*\")\n str0=str.replace(\"\\\"\",\"*doubleq*\")\n str1=str0.replace(\"(\",\"*open*\")\n str2=str1.replace(\")\",\"*close*\")\n str3=str2.replace(\"'\",\"*singleq*\")\n self.options['argu']=str3\n\n complete=\"\"\n no_of_buttons=0\n okeys = self.options.keys()\n for k in okeys:\n if '_' in k:\n x,label = k.split('_')\n no_of_buttons=no_of_buttons+1\n complete=complete+self.options[k]+\"*atype*\"\n\n newcomplete=complete.replace(\"\\\"\",\"*doubleq*\")\n self.options['ctext'] = newcomplete\n self.options['no_of_buttons'] = no_of_buttons\n\n if 'caption' not in self.options:\n self.options['caption'] = ''\n\n if 'include' not in self.options:\n self.options['include'] = 'undefined'\n else:\n lst = self.options['include'].split(',')\n lst = [x.strip() for x in lst]\n self.options['include'] = lst\n\n if 'hidecode' in self.options:\n self.options['hidecode'] = 'none'\n else:\n self.options['hidecode'] = 'block'\n\n return [ActivcodeNode(self.options)]\n\n\nclass ActiveExercise(ActiveCode):\n required_arguments = 1\n optional_arguments = 0\n has_content = True\n\n def run(self):\n self.options['hidecode'] = True\n return super(ActiveExercise,self).run()\n\n\nif __name__ == '__main__':\n a = ActiveCode()\n","repo_name":"artofproblemsolving/pythonbook","sub_path":"_static/activecode.py","file_name":"activecode.py","file_ext":"py","file_size_in_byte":9554,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"19380772431","text":"import tkinter as tk\nimport shlex\nimport subprocess\nimport time\nfrom tkinter import simpledialog\nfrom tkinter import messagebox as mbox\nimport ctimer.ctimer_db as db\n\n\nclass CtimerClockViewBase:\n def get_button_start_pause(self):\n \"\"\"\n Abstract method of updating the button provided by GUI framework\n \"\"\"\n raise NotImplementedError\n\n def get_button_stop(self):\n \"\"\"\n Abstract method of updating the button provided by GUI framework\n \"\"\"\n raise NotImplementedError\n\n def update_label_goal_show(self):\n \"\"\"\n Abstract method of updating the label provided by GUI framework\n \"\"\"\n raise NotImplementedError\n\n def update_label_total_clock_counts(self):\n \"\"\"\n Abstract method of updating the label provided by GUI framework\n \"\"\"\n raise NotImplementedError\n\n def update_label_total_clock_aim(self):\n \"\"\"\n Abstract method of updating the label provided by GUI framework\n \"\"\"\n raise NotImplementedError\n\n def config_label_date(self, text=\"NA\"):\n \"\"\"\n Abstract method to config the date label provided by GUI framework\n \"\"\"\n raise NotImplementedError\n\n def update_label_display(self):\n \"\"\"\n Abstract method of updating the label provided by GUI framework\n \"\"\"\n raise NotImplementedError\n\n def set_bring_to_front(self):\n \"\"\"\n Abstract method of setting bring_to_front flag\n \"\"\"\n raise NotImplementedError\n\n def set_not_bring_to_front(self):\n \"\"\"\n Abstract method of setting not_bring_to_front flag\n \"\"\"\n raise NotImplementedError\n\n def create_widgets(self):\n \"\"\"\n Abstract method of creating widgest of GUI window\n \"\"\"\n raise NotImplementedError\n\n def show_clock_count(self, total_clock_counts: str) -> None:\n \"\"\"\n Abstract method of showing time_text and total_clock_counts with labels\n\n :param total_clock_counts: the total clock counts you want to show\n \"\"\"\n raise NotImplementedError\n\n def show_pause_button(self):\n \"\"\"\n Abstract method to render the pause button\n\n :return: None\n \"\"\"\n raise NotImplementedError\n\n def show_start_button(self):\n \"\"\"\n Abstract method to render the start button\n :return: None\n \"\"\"\n raise NotImplementedError\n\n def countdown_display(self, text, is_break):\n \"\"\"\n Configure the count down timer display\n\n :param text: text we want to display\n :param is_break: if the counting status is in the break slot\n :return: None\n \"\"\"\n raise NotImplementedError\n\n def ask_reached_goal_reason(self):\n \"\"\"\n Ask users about the reason if they reached their goals\n\n Ask users via GUI, return a tuple (boolean yes/no, reason in string text)\n :return: tuple\n \"\"\"\n raise NotImplementedError\n\n def playback_voice_message(self, message_type):\n \"\"\"\n Playback voice message according to message types\n\n By default the message is played by the say tool\n\n :param message_type: message type in string\n :return: None\n \"\"\"\n raise NotImplementedError\n\n def toggle_start_pause(self):\n \"\"\"\n Toggle between start and pauce status of the clock\n\n :return: None\n \"\"\"\n raise NotImplementedError\n\n def terminate(self):\n \"\"\"\n Prematurely terminate clocks\n\n Apply the corresponding when terminate clocks permaturely. This method is uaully used by interrupting a\n ticking clock.\n\n :return: None\n \"\"\"\n raise NotImplementedError\n\n def flash_window(self, flashing_seconds=5):\n \"\"\"\n Falsh the window\n\n :param flashing_seconds: how long the window will be flashing\n :return:\n \"\"\"\n raise NotImplementedError\n\n\nclass CtimerClockView(tk.Frame, CtimerClockViewBase):\n def __init__(self, timer_model, master):\n super().__init__(master)\n\n labels = [\n tk.Label(self, text=\"\", height=2),\n tk.Label(self, height=1, width=15, textvariable=\"\"),\n tk.Label(self, height=1, width=10, textvariable=\"\"),\n tk.Label(self, height=1, width=10, textvariable=\"\"),\n tk.Label(self, height=3, width=10, font=(\"Arial\", 30), textvariable=\"\"),\n tk.Label(self, height=1, width=25, textvariable=\"\"),\n ]\n\n buttons = [\n tk.Button(\n self,\n text=\"Start\",\n fg=\"Green\",\n width=8,\n height=4,\n command=self.toggle_start_pause,\n ),\n tk.Button(\n self,\n fg=\"Dark Red\",\n activebackground=\"Dark Red\",\n text=\"Stop\",\n width=8,\n height=4,\n command=self.terminate,\n ),\n ]\n\n self.master = master\n self.tm = timer_model\n master.title(self.tm.title)\n if self.tm.debug:\n self.playback_voice_message(\"Welcome_debug\")\n else:\n self.playback_voice_message(\"Welcome\")\n self.data = self.tm.data\n\n self._button_start_pause = buttons[0]\n self._button_stop = buttons[1]\n\n self._label_goal_show = labels[0]\n self._label_total_clock_counts = labels[1]\n self._label_total_clock_aim = labels[2]\n self._label_date = labels[3]\n self._label_display = labels[4]\n self._label_latest_pomodoro_completed_timestamp = labels[5]\n self.create_widgets()\n self._label_total_clock_counts.config(\n text=f\"Done: {self.tm.clock_details.clock_count}\"\n )\n self._label_latest_pomodoro_completed_timestamp.config(\n text=f\"Completed at: unknown yet\"\n )\n\n def get_button_start_pause(self):\n return self._button_start_pause\n\n def config_label_date(self, text=\"NA\"):\n self._label_date.config(text=text)\n\n def set_bring_to_front(self):\n self.master.attributes(\"-topmost\", 1)\n\n def set_not_bring_to_front(self):\n self.master.attributes(\"-topmost\", 0)\n\n def create_widgets(self):\n self.countdown_display(\"Click start!\", self.tm.clock_details.is_break)\n # self._label_display.config(text=self.set_time_print)\n self._label_date.config(text=self.tm.clock_details.date)\n self._label_total_clock_aim.config(text=f\"Aim: {self.data.aim_clock_count}\")\n self._label_total_clock_counts.config(text=f\"Done: ...Loading...\")\n self._label_latest_pomodoro_completed_timestamp.config(text=f\"Latest Pomodoro Completed Timestamp\")\n self._label_date.grid(row=0, column=0, columnspan=2)\n self._label_total_clock_aim.grid(row=1, column=0, columnspan=2)\n self._label_total_clock_counts.grid(row=2, column=0, columnspan=2)\n self._label_display.grid(row=3, column=0, columnspan=2)\n self._button_start_pause.grid(row=5, column=0)\n self._button_stop.grid(row=5, column=1)\n self._label_goal_show.grid(row=6, column=0, columnspan=2)\n self._label_latest_pomodoro_completed_timestamp.grid(row=7, column=0, columnspan=2)\n\n self.pack()\n\n def show_clock_count(self, total_clock_counts):\n self._label_total_clock_counts.config(\n text=f\"Total clocks: {total_clock_counts}\"\n )\n\n def show_latest_pomodoro_completed_timestamp(self, latest_pomodoro_completed_timestamp):\n self._label_latest_pomodoro_completed_timestamp.config(\n text=f\"Completed at: {latest_pomodoro_completed_timestamp}\"\n )\n\n def show_pause_button(self):\n self._button_start_pause[\"text\"] = \"Pause\"\n self._button_start_pause[\"fg\"] = \"Red\"\n\n def show_start_button(self):\n self._button_start_pause[\"text\"] = \"Start\"\n self._button_start_pause[\"fg\"] = \"Green\"\n self._label_display[\"fg\"] = \"Black\"\n\n def countdown_display(self, text, is_break):\n if is_break:\n self._label_display[\"fg\"] = \"Green\"\n else:\n self._label_display[\"fg\"] = \"Black\"\n self._label_display.config(text=text)\n\n def get_goal(self):\n # TODO: get all goals for all clocks for the day\n self.tm.clock_details.task_description = simpledialog.askstring(\n title=\"Set your goals\", prompt=\"What's your goal for this clock:\"\n )\n self._label_goal_show[\n \"text\"\n ] = f\"Goal: {self.tm.clock_details.task_description}\"\n\n def ask_reached_goal_reason(self):\n reason = \"N.A.\"\n reached_bool = mbox.askyesno(\n \"Goal reached?\", \"Did you reach your goal?\", parent=self\n )\n\n if reached_bool is False:\n reason = simpledialog.askstring(\n \"Goal reached description\",\n \"What happened? \"\n \"What was a suprise? \\n\"\n \"What needs to modify to \"\n \"have a realistic goal? \",\n parent=self,\n )\n else:\n reason = simpledialog.askstring(\n \"Goal reached description\",\n \"Congratulations! \"\n \"How did you make it?\",\n parent=self,\n )\n\n return reached_bool, reason\n\n def playback_voice_message(self, message_type):\n message = \"\"\n if not self.tm.silence:\n if message_type == \"done\":\n if self.tm.clock_details.clock_count == 1:\n message = (\n f\"Beebeebeebee beebee. Done. You have achieved 1 clock today. \"\n f\"Did you reach your goal?\"\n )\n elif (\n self.tm.clock_details.clock_count % self.data.long_break_clock_count\n == 0\n ):\n message = (\n f\"Beebeebeebee. Hooray. You achieved {self.tm.clock_details.clock_count} clocks \"\n f\"already. \"\n f\"Did you finish your goal?.\"\n )\n else:\n message = (\n f\"Beebeebeebee beebee. Done. You have achieved {self.tm.clock_details.clock_count} \"\n f\"clocks today. Did you reach your goal?\"\n )\n elif message_type == \"start\":\n # TODO: if starting a new clock, new message: ready? set your goal\n message = \"ready? Start.\"\n elif message_type == \"continue\":\n message = \"continue\"\n elif message_type == \"pause\":\n message = \"Pause\"\n elif message_type == \"stop\":\n message = \"Stop and recharge\"\n elif message_type == \"break_over\":\n message = \"Times up. Click start to start a new clock!\"\n elif message_type == \"enjoy\":\n message = \"Thanks! Enjoy your break!\"\n elif message_type == \"enjoy_long\":\n message = \"Thanks! Enjoy your long break!\"\n elif message_type == \"Welcome\":\n message = \"Welcome.\"\n elif message_type == \"Welcome_debug\":\n message = \"Welcome to debug mode.\"\n command = shlex.split(f\"say {message}\")\n subprocess.run(command)\n\n def toggle_start_pause(self):\n # is paused: start clock\n if not self.tm.clock_ticking:\n if self.tm.fresh_new:\n self.playback_voice_message(\"start\")\n self.get_goal()\n self.config_label_date(text=self.tm.clock_details.date)\n self.tm.clock_details.start_clock = time.time()\n self.tm.fresh_new = False\n else:\n self.playback_voice_message(\"continue\")\n\n self.tm.clock_details.start_clock = time.time()\n self.show_pause_button()\n self.tm.clock_ticking = True\n # is ticking: pause clock\n else:\n self.playback_voice_message(\"pause\")\n self.tm.clock_details.pause_toggled = True\n self.tm.clock_details.end_clock = time.time()\n self.tm.clock_details.reached_bool, self.tm.clock_details.reason = False, 0\n db.db_add_clock_details(self.tm.db_file, self.tm.clock_details)\n self.show_start_button()\n self.tm.clock_ticking = False\n\n def terminate(self):\n self.playback_voice_message(\"stop\")\n if not self.tm.clock_details.is_break and not self.tm.fresh_new:\n self.tm.clock_details.reached_bool, self.tm.clock_details.reason = self.ask_reached_goal_reason()\n if self.tm.clock_details.reached_bool:\n self.tm.clock_details.clock_count += 1\n self.countdown_display(\"Done!\", self.tm.clock_details.is_break)\n self.show_clock_count(self.tm.clock_details.clock_count)\n # only if goal is reached, clock count is +1, we want to ask if it is a complete (no break) clock\n self.tm.check_complete()\n # Write to clock details even if it is terminated. (goal not reached)\n timestamp = time.time()\n self.tm.clock_details.end_clock = timestamp\n db.db_add_clock_details(self.tm.db_file, self.tm.clock_details)\n # set to new status\n self.tm.clock_details.is_break = False\n self.tm.clock_ticking = False\n self.tm.clock_details.get_new_clock_entry()\n self.tm.remaining_time = self.tm.set_time\n self.tm.fresh_new = True\n self.countdown_display(\"Click start!\", self.tm.clock_details.is_break)\n self.show_start_button()\n\n\n def flash_window(self, flashing_seconds=5):\n # check flashing_button.py\n pass\n\n\nclass CtimerClockFakeView(CtimerClockView):\n def __init__(self, timer_model, master):\n self.tm = timer_model\n # master won't be used in a fake view as it is a part of GUI framework\n self.master = master\n # GUI labels and buttons from top to bottom.\n self._label_date = None\n self._label_total_clock_aim = None\n self._label_total_clock_counts = None\n self._label_display = None\n self._button_start_pause = \"Start\"\n self._button_stop = \"Stop\"\n self._label_goal_show = None\n\n def config_label_date(self, text=\"NA\"):\n self._label_date = text\n self._show_gui_window_response(f\"Date label : {text}\")\n\n def _show_gui_window_response(self, msg):\n print(f\"\\n[GUI response] {msg}\")\n\n def _show_gui_start_pause_button(self, msg):\n self._show_gui_window_response(f\"start/pause button: {msg}\")\n\n @staticmethod\n def _get_fake_goal():\n return \"[get goal description]\"\n\n def set_bring_to_front(self):\n self._show_gui_window_response(\"is set as 'always on the top'.\")\n\n def set_not_bring_to_front(self):\n self._show_gui_window_response(\"is disabled 'always on the top'.\")\n\n def create_widgets(self):\n self.countdown_display(\"Click start!\", self.tm.clock_details.is_break)\n self._label_date = self.tm.clock_details.date\n self._label_total_clock_aim = f\"Aim: {self.data.aim_clock_count}\"\n self._label_total_clock_counts = f\"Done: ...Loading...\"\n self._show_gui_window_response(\"Widgets are created and arranged by the layout.\")\n\n\n def show_clock_count(self, total_clock_counts):\n self._show_gui_window_response(f\"total_clock_counts: {total_clock_counts}\")\n self._label_total_clock_counts = total_clock_counts\n\n def show_pause_button(self):\n self._show_gui_start_pause_button(\"text: pause, fg: red\")\n self._button_start_pause = \"Pause\"\n\n def show_start_button(self):\n self._show_gui_start_pause_button(\"text: start, fg: green\")\n self._button_start_pause = \"Start\"\n self._show_gui_window_response(\"fg: black\")\n\n\n def countdown_display(self, text, is_break):\n if is_break:\n self._show_gui_window_response(\"fg: green\")\n else:\n self._show_gui_window_response(\"fg: black\")\n self._show_gui_window_response(text)\n self._label_display = text\n\n def get_goal(self):\n self.tm.clock_details.task_description = self._get_fake_goal()\n self._show_gui_window_response(f\"text: Goal: {self.tm.clock_details.task_description}\")\n self._label_goal_show = self.tm.clock_details.task_description\n def ask_reached_goal_reason(self):\n return True, \"fake reached goal reasons\"\n\n def playback_voice_message(self, message_type):\n self._show_gui_window_response(f\"Voice: {message_type}\")\n","repo_name":"zztin/ctimer","sub_path":"ctimer/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":16801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4509545983","text":"from django.db import models\n\n# Create your models here.\n\nclass Investor(models.Model):\n first_name = models.CharField(max_length=100, null=True)\n last_name = models.CharField(max_length=100, null=True)\n username = models.CharField(max_length=100, null=True)\n email = models.EmailField(max_length=100, null=True)\n date_created = models.DateTimeField(auto_now_add=True, null=True)\n\n def __str__(self):\n return self.username \n\n\nclass Investment(models.Model):\n INV_TYPE = (\n ('Long Term', 'Long Term'),\n ('Short Term', 'Short Term'),\n )\n PAYOUTS = (\n ('Monthly', 'Monthly'),\n ('Quarterly', 'Quarterly'),\n ('Half Yearly', 'Half Yearly'),\n ('Annualy', 'Annualy'),\n )\n inv_type = models.CharField(max_length=100, choices=INV_TYPE)\n inv_image = models.ImageField(upload_to = 'pics', blank=True)\n inv_per_unit = models.IntegerField(null=True)\n tenure = models.FloatField(null=True)\n payouts = models.CharField(max_length=100, null=True, choices=PAYOUTS)\n lock_in = models.IntegerField(null=True)\n return_per = models.IntegerField(null=True)\n avail_units = models.IntegerField(null=True)\n\n def __str__(self):\n return self.inv_type \n\n\nclass Order(models.Model):\n STATUS = (\n ('In Cart', 'In Cart'),\n ('Ordered', 'Ordered'),\n ('Matured', 'Matured'),\n )\n investor = models.ForeignKey(Investor, null=True, on_delete=models.SET_NULL)\n investment = models.ForeignKey(Investment, null=True, on_delete=models.SET_NULL)\n date_created = models.DateTimeField(auto_now_add=True, null=True)\n status = models.CharField(max_length=100, null=True, choices=STATUS) \n","repo_name":"Urohit/farm-code","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24818295470","text":"from tkinter import *\nfrom bruchrechner import *\n\n\nclass Bruchrechner_GUI:\n def __init__(self):\n window = Tk()\n window.geometry('210x200+800+300')\n window.title(\"Bruchrechner\")\n window.resizable(False, False)\n\n self.derRechner = Bruchrechner()\n\n def createLabel(t: str, posX: int):\n label = Label(window, text=t)\n label.place(x=posX, y=10)\n\n createLabel(\"Bruch 1\", 10)\n createLabel(\"Bruch 2\", 78)\n createLabel(\"Ergebnis\", 150)\n\n self.operatorText = StringVar()\n self.operatorText.set(\"+\")\n la4 = Label(window, textvariable=self.operatorText, font='{Arial} 14')\n la4.place(x=54, y=60)\n\n la5 = Label(window, text='=', font='{Arial} 14')\n la5.place(x=128, y=60)\n\n def createEntry(textvar: IntVar, posX: int, posY: int):\n entry = Entry(window, width=3, textvariable=textvar)\n entry.place(x=posX, y=posY, width=30)\n\n def createLabel(textvar: IntVar, posX: int, posY: int, Height: int):\n lab = Label(window, fg=\"black\", relief=\"raised\", textvariable=textvar)\n lab.place(x=posX, y=posY, width=30, height=Height)\n\n self.int1 = IntVar()\n createEntry(self.int1, 18, 50)\n self.int2 = IntVar()\n createEntry(self.int2, 18, 80)\n self.int3 = IntVar()\n createEntry(self.int3, 88, 50)\n self.int4 = IntVar()\n createEntry(self.int4, 88, 80)\n\n self.int5 = IntVar()\n createLabel(self.int5, 162, 50, 20)\n self.int6 = IntVar()\n createLabel(self.int6, 162, 80, 20)\n\n def createBtn(symbol: str, posX: int, cmd):\n btn = Button(window, text=symbol, font='{Arial} 13',\n command=cmd, bg=\"light green\")\n btn.place(x=posX, y=130)\n\n createBtn(\"+\", 18+6, self.addition)\n createBtn(\"-\", 65+6, self.subtraction)\n createBtn(\"*\", 110+6, self.multiplication)\n createBtn(\"/\", 155+6, self.division)\n\n def setAndGet(self):\n self.derRechner.setBruch1Zehler(self.int1.get())\n self.derRechner.setBruch1Nenner(self.int2.get())\n self.derRechner.setBruch2Zehler(self.int3.get())\n self.derRechner.setBruch2Nenner(self.int4.get())\n self.int5.set(self.derRechner.getErgebnisZehler())\n self.int6.set(self.derRechner.getErgebnisNenner())\n\n def addition(self):\n self.operatorText.set(\"+\")\n self.setAndGet()\n self.derRechner.addition()\n\n def subtraction(self):\n self.operatorText.set(\"-\")\n self.setAndGet()\n self.derRechner.subtraction()\n\n def multiplication(self):\n self.operatorText.set(\"*\")\n self.setAndGet()\n self.derRechner.multiplication()\n\n def division(self):\n self.operatorText.set(\"/\")\n self.setAndGet()\n self.derRechner.division()\n\n\nif __name__ == '__main__':\n Window = Bruchrechner_GUI()\n mainloop()\n","repo_name":"filip-h-999/Bruchrechner","sub_path":"Bruchrechner/bruchrechner_gui.py","file_name":"bruchrechner_gui.py","file_ext":"py","file_size_in_byte":2958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73473601014","text":" \n#import all the required libraries\nimport numpy as np\nimport pandas as pd\nimport pickle\nfrom statistics import mode\nimport nltk\nfrom nltk import word_tokenize\nfrom nltk.stem import LancasterStemmer\n#nltk.download('wordnet')\n#nltk.download('stopwords')\n#nltk.download('punkt')\nfrom nltk.corpus import stopwords\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras import models\nfrom tensorflow.keras import backend as K \nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.preprocessing.text import Tokenizer \nfrom tensorflow.keras.utils import plot_model\nfrom tensorflow.keras.layers import Input,LSTM,Embedding,Dense,Concatenate,Attention\nfrom sklearn.model_selection import train_test_split\nfrom bs4 import BeautifulSoup\n\n#read the dataset file\ndf=pd.read_csv(\"AI_app/reviews.csv\",nrows=100000)\n#drop the duplicate and na values from the records\ndf.drop_duplicates(subset=['Text'],inplace=True)\ndf.dropna(axis=0,inplace=True)\ninput_data = df.loc[:,'Text']\ntarget_data = df.loc[:,'Summary']\ntarget_data.replace('', np.nan, inplace=True)\n\ninput_texts=[]\ntarget_texts=[]\ninput_words=[]\ntarget_words=[]\ncontractions= pickle.load(open(\"AI_app/contractions.pkl\",\"rb\"))['contractions']\n#initialize stop words and LancasterStemmer\nstop_words=set(stopwords.words('english'))\nstemm=LancasterStemmer()\n\ndef clean(texts,src):\n #remove the html tags\n texts = BeautifulSoup(texts, \"lxml\").text\n #tokenize the text into words \n words=word_tokenize(texts.lower())\n #filter words which contains \\ \n #integers or their length is less than or equal to 3\n words= list(filter(lambda w:(w.isalpha() and len(w)>=3),words))\n #contraction file to expand shortened words\n words= [contractions[w] if w in contractions else w for w in words ]\n #stem the words to their root word and filter stop words\n if src==\"inputs\":\n words= [stemm.stem(w) for w in words if w not in stop_words]\n else:\n words= [w for w in words if w not in stop_words]\n return words\n\n#pass the input records and taret records\nfor in_txt,tr_txt in zip(input_data,target_data):\n in_words= clean(in_txt,\"inputs\")\n input_texts+= [' '.join(in_words)]\n input_words+= in_words\n #add 'sos' at start and 'eos' at end of text\n tr_words= clean(\"sos \"+tr_txt+\" eos\",\"target\")\n target_texts+= [' '.join(tr_words)]\n target_words+= tr_words\n\n#store only unique words from input and target list of words\ninput_words = sorted(list(set(input_words)))\ntarget_words = sorted(list(set(target_words)))\nnum_in_words = len(input_words) #total number of input words\nnum_tr_words = len(target_words) #total number of target words\n\n#get the length of the input and target texts which appears most often \nmax_in_len = mode([len(i) for i in input_texts])\nmax_tr_len = mode([len(i) for i in target_texts])\n\n\n\n#split the input and target text into 80:20 ratio or testing size of 20%.\nx_train,x_test,y_train,y_test=train_test_split(input_texts,target_texts,test_size=0.2,random_state=0)\n\n\n#train the tokenizer with all the words\nin_tokenizer = Tokenizer()\nin_tokenizer.fit_on_texts(x_train)\ntr_tokenizer = Tokenizer()\ntr_tokenizer.fit_on_texts(y_train)\n\n# encoder inference\nlatent_dim=500\n#load the model\nmodel = models.load_model(\"AI_app/s2s\")\n\n#construct encoder model from the output of 6 layer i.e.last LSTM layer\nen_outputs,state_h_enc,state_c_enc = model.layers[6].output\nen_states=[state_h_enc,state_c_enc]\n#add input and state from the layer.\nen_model = Model(model.input[0],[en_outputs]+en_states)\n\n# decoder inference\n#create Input object for hidden and cell state for decoder\n#shape of layer with hidden or latent dimension\ndec_state_input_h = Input(shape=(latent_dim,))\ndec_state_input_c = Input(shape=(latent_dim,))\ndec_hidden_state_input = Input(shape=(max_in_len,latent_dim))\n\n# Get the embeddings and input layer from the model\ndec_inputs = model.input[1]\ndec_emb_layer = model.layers[5]\ndec_lstm = model.layers[7]\ndec_embedding= dec_emb_layer(dec_inputs)\n\n#add input and initialize LSTM layer with encoder LSTM states.\ndec_outputs2, state_h2, state_c2 = dec_lstm(dec_embedding, initial_state=[dec_state_input_h,dec_state_input_c])\n\n#Attention layer\nattention = model.layers[8]\nattn_out2 = attention([dec_outputs2,dec_hidden_state_input])\n\nmerge2 = Concatenate(axis=-1)([dec_outputs2, attn_out2])\n\n#Dense layer\ndec_dense = model.layers[10]\ndec_outputs2 = dec_dense(merge2)\n\n# Finally define the Model Class\ndec_model = Model(\n[dec_inputs] + [dec_hidden_state_input,dec_state_input_h,dec_state_input_c],\n[dec_outputs2] + [state_h2, state_c2])\n\n#create a dictionary with a key as index and value as words.\nreverse_target_word_index = tr_tokenizer.index_word\nreverse_source_word_index = in_tokenizer.index_word\ntarget_word_index = tr_tokenizer.word_index\nreverse_target_word_index[0]=' '\n\ndef decode_sequence(input_seq):\n #get the encoder output and states by passing the input sequence\n en_out, en_h, en_c= en_model.predict(input_seq)\n\n #target sequence with inital word as 'sos'\n target_seq = np.zeros((1, 1))\n target_seq[0, 0] = target_word_index['sos']\n\n #if the iteration reaches the end of text than it will be stop the iteration\n stop_condition = False\n #append every predicted word in decoded sentence\n decoded_sentence = \"\"\n while not stop_condition: \n #get predicted output, hidden and cell state.\n output_words, dec_h, dec_c= dec_model.predict([target_seq] + [en_out,en_h, en_c])\n \n #get the index and from the dictionary get the word for that index.\n word_index = np.argmax(output_words[0, -1, :])\n text_word = reverse_target_word_index[word_index]\n decoded_sentence += text_word +\" \"\n\n # Exit condition: either hit max length\n # or find a stop word or last word.\n if text_word == \"eos\" or len(decoded_sentence) > max_tr_len:\n stop_condition = True\n \n #update target sequence to the current word index.\n target_seq = np.zeros((1, 1))\n target_seq[0, 0] = word_index\n en_h, en_c = dec_h, dec_c\n \n #return the deocded sentence\n return decoded_sentence\n\n\ndef Abstractive(review):\n inp_review = clean(review,\"inputs\")\n inp_review = ' '.join(inp_review)\n inp_x= in_tokenizer.texts_to_sequences([inp_review]) \n inp_x= pad_sequences(inp_x, maxlen=max_in_len, padding='post')\n\n summary=decode_sequence(inp_x.reshape(1,max_in_len))\n if 'eos' in summary :\n summary=summary.replace('eos','')\n return summary\n\nclass Summary():\n\n def __init__(self, review_text) -> None:\n self.review = review_text\n\n def extractive(self):\n import spacy\n from spacy.lang.en.stop_words import STOP_WORDS\n stopwords=list(STOP_WORDS)\n from string import punctuation\n punctuation=punctuation+ '\\n'\n #run this in terminal \n #python -m spacy download en\n nlp = spacy.load('en_core_web_sm')\n doc= nlp(self.review)\n\n #calculate word frequencies\n word_frequencies={}\n for word in doc:\n if word.text.lower() not in stopwords:\n if word.text.lower() not in punctuation:\n if word.text not in word_frequencies.keys():\n word_frequencies[word.text] = 1\n else:\n word_frequencies[word.text] += 1\n\n #normalize frequencies\n max_frequency=max(word_frequencies.values())\n for word in word_frequencies.keys():\n word_frequencies[word]=word_frequencies[word]/max_frequency\n\n #sentence tokeniization\n sentence_tokens= [sent for sent in doc.sents]\n sentence_scores = {}\n for sent in sentence_tokens:\n for word in sent:\n if word.text.lower() in word_frequencies.keys():\n if sent not in sentence_scores.keys(): \n sentence_scores[sent]=word_frequencies[word.text.lower()]\n else:\n sentence_scores[sent]+=word_frequencies[word.text.lower()]\n print(type(sentence_scores))\n score = 0\n summary=None\n for key,value in sentence_scores.items():\n if value > score :\n score = value\n summary = (key)\n return summary \n\nfrom django.shortcuts import render\n# Create your views here.\ndef hey(request):\n if request.method == 'POST':\n review = (request.POST[\"review_text\"])\n s = Summary(review)\n extractive = s.extractive()\n abstractive = Abstractive(review)\n return render(request, 'AI_app/home.html', {'extractive': extractive, 'originaltext':review, 'abstractive': abstractive})\n else:\n return render(request, 'AI_app/home.html',{}) \n","repo_name":"clinton64/reviewsummarizer","sub_path":"AI_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8556,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"29348069954","text":"import pytest\nfrom fastapi.testclient import TestClient\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import Session, sessionmaker\n\nfrom pythonapi.app import app, get_db\nfrom pythonapi.models import Base\n\nSQLALCHEMY_DATABASE_URL = \"sqlite:///./tests/test.db\"\n\nengine = create_engine(\n SQLALCHEMY_DATABASE_URL, connect_args={\"check_same_thread\": False}\n)\nTestingSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)\n\nclient = TestClient(app)\n\n\ndef override_get_db() -> Session:\n try:\n db = TestingSessionLocal()\n yield db\n finally:\n db.close()\n\n\n@pytest.fixture()\ndef test_db() -> Base:\n Base.metadata.create_all(bind=engine)\n yield\n Base.metadata.drop_all(bind=engine)\n\n\napp.dependency_overrides[get_db] = override_get_db\n","repo_name":"MartySalade/PythonAPI","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6569878942","text":"import numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import learning_curve, ShuffleSplit\nfrom sklearn.linear_model import LinearRegression\n\n\nclass ExploratoryAnalysis():\n \n def __init__(self, features_file):\n self.features_file = features_file\n \n def split_data(self, train_data_file, test_data_file):\n \n ''' function that splits our dataset into 80% for training and 20% for testing '''\n \n data = pd.read_csv(self.features_file, encoding='latin-1')\n\n features = np.array(data.iloc[:, 0:-1])\n labels = np.array(data[\"label\"])\n \n X_train, X_test, Y_train, Y_test = train_test_split(features, labels, test_size=0.20)\n \n train_df = pd.DataFrame(X_train)\n train_df[\"label\"] = Y_train\n train_df.to_csv(train_data_file, encoding=\"latin-1\") # save the training dataset\n \n test_df = pd.DataFrame(X_test)\n test_df[\"label\"] = Y_test\n test_df.to_csv(test_data_file, encoding=\"latin-1\") # save the testing dataset\n\n def perform_analysis(self, train_data_file):\n \n ''' function that performs exploratory analysis '''\n \n df_train = pd.read_csv(train_data_file, encoding='latin-1') # read training dataset\n \n# self.scatter_plots(df_train)\n self.fake_true_percentages(df_train, \"big_scale_event\")\n\n sources = ['ahram', 'alaraby', 'arabiya', 'asharqalawsat', 'dailysabah', 'etilaf', 'jordantimes', 'nna', 'trt',\n 'alalam', 'manar', 'sana', 'sputnik', 'tass', 'reuters']\n \n colors = ['b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'r', 'r', 'r', 'r', 'r', 'g']\n self.fake_true_percentages(df_train, \"source\", sources, colors)\n \n categories = ['against', 'pro' , 'neutral']\n self.fake_true_percentages(df_train, \"source_category\", categories)\n\n df_fake = df_train.loc[df_train[\"label\"] == 0] # articles labeled as fake\n df_true = df_train.loc[df_train[\"label\"] == 1] # articles labeled as true \n \n self.plot_label_stats(df_train)\n self.plot_feature_stats(df_fake, \"fake\")\n self.plot_feature_stats(df_true, \"true\")\n \n def plot_label_stats(self, df):\n \n labels = {\"true\": (np.count_nonzero(np.array(df[\"label\"]))) / len(df[\"label\"]) * 100,\n \"fake\": (len(df[\"label\"]) - np.count_nonzero(np.array(df[\"label\"]))) / len(df[\"label\"]) * 100}\n \n self.plot_figure(labels, \"label\", \"% of articles\", \"true_fake_dist\", colors=['r', 'g'])\n \n def fake_true_percentages(self, train_df, column, categories=None, colors=['b', 'r', 'g']):\n \n ''' function that calculates % true and % fake for a given column in the entire train dataset'''\n \n if categories == None:\n categories = np.unique(train_df[column]) # get unique values\n \n nb_total_per_category = {} # dictionary of category:nb articles\n nb_fake_per_category = {} # dictionary of category:nb fake articles\n nb_true_per_category = {} # dictionary of category:nb true articles\n \n for category in categories:\n if category != \"none\":\n nb_total_per_category[category] = 0\n nb_fake_per_category[category] = 0\n nb_true_per_category[category] = 0\n \n for _, row in train_df.iterrows():\n \n if row[column] != \"none\":\n nb_total_per_category[row[column]] += 1\n if row[\"label\"] == 0: # fake\n nb_fake_per_category[row[column]] += 1\n elif row[\"label\"] == 1: # true\n nb_true_per_category[row[column]] += 1 \n \n for category in categories:\n if category != \"none\":\n nb_fake_per_category[category] = (nb_fake_per_category[category] / nb_total_per_category[category]) * 100 \n nb_true_per_category[category] = (nb_true_per_category[category] / nb_total_per_category[category]) * 100\n \n self.plot_figure(nb_total_per_category, column, \"nb articles in our dataset from this \" + column, column + \"_total.png\", colors)\n self.plot_figure(nb_true_per_category, column, \"% of this \" + column + \"'s articles that was labeled true\", column + \"_true.jpg\", colors)\n self.plot_figure(nb_fake_per_category, column, \"% of this \" + column + \"'s articles that was labeled fake\", column + \"_fake.jpg\", colors)\n \n def plot_figure(self, array_to_plot, xlabel, ylabel, title, colors):\n \n plt.figure(figsize=(10, 12)) \n plt.bar(range(len(array_to_plot)), list(array_to_plot.values()), align='center', color=colors)\n plt.xticks(range(len(array_to_plot)), list(array_to_plot.keys()))\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n \n if xlabel == \"source_category\" or xlabel == \"source\": # color columns based on source's category\n against = mpatches.Patch(color='blue', label='against')\n pro = mpatches.Patch(color='red', label='pro')\n neutral = mpatches.Patch(color='green', label='neutral')\n \n plt.legend(handles=[against, pro, neutral])\n \n plt.savefig(title) \n \n plt.figure() \n \n def scatter_plots(self, df):\n \n ''' function that plots the scatter plots of each pair of features '''\n \n# df.loc[df['label'] == 0, ['label']] = \"fake\"\n# df.loc[df['label'] == 1, ['label']] = \"true\"\n \n corr = df.drop(columns=['unit_id']).corr()\n\n # Generate a mask for the upper triangle\n mask = np.zeros_like(corr, dtype=np.bool)\n mask[np.triu_indices_from(mask)] = True\n \n # Set up the matplotlib figure\n f, ax = plt.subplots()\n \n # Generate a custom diverging colormap\n cmap = sns.diverging_palette(220, 10, as_cmap=True)\n \n # Draw the heatmap with the mask and correct aspect ratio\n sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,\n square=True, linewidths=.5, cbar_kws={\"shrink\": .5})\n \n# plt.setp(ax.get_xticklabels(), ha=\"right\")\n# plt.setp(ax.get_yticklabels(), ha=\"right\")\n\n plt.savefig(\"scatter_plot.png\")\n \n# \n# # column_pairs = list(product(df.columns, df.columns))\n# column_pairs = [('sectarian_language', 'quoted_sources'),\n# ('sectarian_language', 'bias'),\n# ('sectarian_language', 'factive_verbs'),\n# ('sectarian_language', 'implicative_verbs'),\n# ('sectarian_language', 'hedges'),\n# ('sectarian_language', 'report_verbs'),\n# ('sectarian_language', 'assertive_verbs'),\n# ('sectarian_language', 'consistency_score'),\n# ('quoted_sources', 'sectarian_language'),\n# ('quoted_sources', 'bias'),\n# ('quoted_sources', 'factive_verbs'),\n# ('quoted_sources', 'implicative_verbs'),\n# ('quoted_sources', 'hedges'), ('quoted_sources', 'report_verbs'),\n# ('quoted_sources', 'assertive_verbs'), ('quoted_sources', 'consistency_score'),\n# ('bias', 'sectarian_language'), ('bias', 'quoted_sources'), ('bias', 'factive_verbs'), ('bias', 'implicative_verbs'), ('bias', 'hedges'), ('bias', 'report_verbs'), ('bias', 'assertive_verbs'), ('bias', 'consistency_score'),\n# ('factive_verbs', 'sectarian_language'), ('factive_verbs', 'quoted_sources'), ('factive_verbs', 'bias'), ('factive_verbs', 'implicative_verbs'), ('factive_verbs', 'hedges'), ('factive_verbs', 'report_verbs'), ('factive_verbs', 'assertive_verbs'), ('factive_verbs', 'consistency_score'),\n# ('implicative_verbs', 'sectarian_language'), ('implicative_verbs', 'quoted_sources'), ('implicative_verbs', 'bias'), ('implicative_verbs', 'factive_verbs'), ('implicative_verbs', 'hedges'), ('implicative_verbs', 'report_verbs'), ('implicative_verbs', 'assertive_verbs'), ('implicative_verbs', 'consistency_score'),\n# ('hedges', 'sectarian_language'), ('hedges', 'quoted_sources'), ('hedges', 'bias'), ('hedges', 'factive_verbs'), ('hedges', 'implicative_verbs'), ('hedges', 'report_verbs'), ('hedges', 'assertive_verbs'), ('hedges', 'consistency_score'),\n# ('report_verbs', 'sectarian_language'), ('report_verbs', 'quoted_sources'), ('report_verbs', 'bias'), ('report_verbs', 'factive_verbs'), ('report_verbs', 'implicative_verbs'), ('report_verbs', 'hedges'), ('report_verbs', 'assertive_verbs'), ('report_verbs', 'consistency_score'),\n# ('assertive_verbs', 'sectarian_language'), ('assertive_verbs', 'quoted_sources'), ('assertive_verbs', 'bias'), ('assertive_verbs', 'factive_verbs'), ('assertive_verbs', 'implicative_verbs'), ('assertive_verbs', 'hedges'), ('assertive_verbs', 'report_verbs'), ('assertive_verbs', 'consistency_score'),\n# ('consistency_score', 'sectarian_language'), ('consistency_score', 'quoted_sources'), ('consistency_score', 'bias'), ('consistency_score', 'factive_verbs'), ('consistency_score', 'implicative_verbs'), ('consistency_score', 'hedges'), ('consistency_score', 'report_verbs'), ('consistency_score', 'assertive_verbs')\n# ]\n# \n# print(column_pairs)\n# for pair in column_pairs:\n# sns.pairplot(df[[pair[0], pair[1], \"label\"]], hue=\"label\")\n# plt.savefig(\"new_scatter_plots/%s_%s.png\" % (pair[0], pair[1]))\n\n def plot_feature_stats(self, df, title):\n \n ''' function that performs exploratory analysis on the features in our dataset '''\n \n file = open(\"stats_for_\" + title, 'w+')\n \n file.write(\"stats for \" + title)\n file.write(\"total number of articles: %d\" % len(df))\n \n ''' numerical features '''\n \n for col in [\"quoted_sources\"]:\n file.write(\"stats for \" + col)\n file.write(str(df[col].describe()))\n plt.figure(figsize=(9, 8))\n \n cut1 = 0\n cut2 = 0\n cut3 = 0\n cut_off = 0.05\n for val in df[col]:\n if val == 0:\n cut1 += 1\n elif val == 0.5:\n cut2 += 1\n else:\n cut3 += 1\n \n cut1 /= len(df[col])\n cut2 /= len(df[col])\n \n cut1 *= 100\n cut2 *= 100\n print(cut1, cut2)\n \n min_val = df[col].min()\n max_val = df[col].max()\n \n str1 = \"%.2f to %.2f\" % (min_val, cut_off)\n str2 = \"%.2f to %.2f\" % (cut_off, max_val)\n \n plt.bar([\"0\", \"0.5\", \"1\"], [cut1, cut2, cut3], color='g')\n \n plt.xlabel(col)\n plt.ylabel('% ' + title + ' articles')\n \n plt.ylim(0, 100)\n plt.grid(None)\n plt.savefig(\"output/exploratory_analysis_plots/\" + col + \"_\" + title + \".png\")\n \n file.close()\n \n def plot_learning_curve(self, training_data_file, learning_curve_file):\n \n '''read data from file'''\n \n data = pd.read_csv(training_data_file, encoding='latin-1') \n \n Y = np.array(data[\"label\"]) \n X = np.array(data.iloc[:, 6:10]) \n\n '''fit a linear regression classifier to the training data'''\n \n regr = LinearRegression()\n regr = regr.fit(X, Y)\n \n '''plot the learning curve'''\n \n # set the cross-validation factor to be used in evaluating learning curve\n cv = ShuffleSplit(n_splits=len(Y), test_size=0.2) # 20% for validation\n \n train_sizes, train_scores, test_scores = learning_curve(regr, X, Y, cv=cv) # calculate learning curve values\n \n # mean of the results of the training and testing\n train_scores_mean = np.mean(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n \n '''plot results'''\n \n plt.figure()\n plt.xlabel(\"Number of Training Points\")\n plt.ylabel(\"Error\")\n \n plt.plot(train_sizes, train_scores_mean, color=\"r\", label=\"Ein\")\n plt.plot(train_sizes, test_scores_mean, color=\"g\", label=\"Eval\")\n \n frame = plt.gca()\n# frame.axes.yaxis.set_ticklabels([])\n frame.invert_yaxis()\n \n plt.legend(loc=\"best\")\n plt.savefig(learning_curve_file, bbox_inches='tight')\n","repo_name":"fakenewssyria/fake_news_detection","sub_path":"exploratory_analysis/exploratory_analysis.py","file_name":"exploratory_analysis.py","file_ext":"py","file_size_in_byte":13052,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"42786830962","text":"# Q) WAP to find Prime Numbers?\n\nno = int(input(\"Enter NO: \"))\n\ni = 2\nf = 0\n\nwhile i < no:\n if no % i == 0:\n f = 1\n break\n\n i += 1\n\nif f == 1:\n print(\"It is not a Prime Number\")\nelse:\n print(\"It is a Prime Number\")","repo_name":"Vyomesh0108/Python20","sub_path":"Prime_Numbers.py","file_name":"Prime_Numbers.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20485396891","text":"import numpy as np\r\n\r\nclass Binomial_Classifier():\r\n ''' Description\r\n This is a supervised learning algorithm, Binomial Naive Bayes.\r\n In fit function:\r\n The model learns the knowledge(the FREQUENCIES OF 1 and 0)\r\n from the dataset, and uses the knowledge to predict new data.\r\n EACH data sample contains:\r\n an input and a label, where the input is\r\n a set of features(1s or 0s), such as [x1,x2,...xi...],\r\n let's say there are 1000 features for each sample.\r\n For EACH category:\r\n the model learns A and Prior\r\n where A is (the frequencies of 1)/(the frequencies of 0).\r\n Then A = (A+epsilon)/sum(A+epsilon), where the numerator\r\n is a vector with size 1000, and denominator is a number.\r\n A has the same size as a sample, 1000.\r\n The Prior = (# of samples in this category)/(entire dataset)\r\n Thus, K categories, K As and K Priors.\r\n The model uses As and Priors to predict new samples.\r\n In predict funcion:\r\n posterior = p(1-p)prior\r\n Let's say we have 800 new samples.\r\n For EACH category:\r\n the model generates 800 probabilities/scores.\r\n Thus, we get 800 by K probabilities, where K is\r\n the number of the categories.\r\n For each row:\r\n we find the index of the max probability,\r\n so the category we want to assign to the sample\r\n is that index.\r\n '''\r\n def fit(self, X, y, epsilon=1):\r\n self.priors = dict()\r\n self.likelihoods = dict()\r\n self.K = set(y.astype(int))\r\n for k in self.K:\r\n X_k = X[y == k]\r\n self.priors[k] = len(X_k)/len(X)\r\n self.likelihoods[k] = (np.count_nonzero(X_k, 0)+epsilon)/sum(np.count_nonzero(X_k, 0)+epsilon)\r\n\r\n def predict(self, X):\r\n N, D = X.shape\r\n P_hat = np.zeros((N, len(self.K)))\r\n for k, l in self.likelihoods.items():\r\n P_hat[:,k] = np.sum(X*np.log(l),1)+np.sum((1-X)*np.log(1-l),1)+np.log(self.priors[k])\r\n return np.argmax(P_hat, 1).astype(int)\r\n\r\n def get_confusion(self, X, y):\r\n n = len(self.K)\r\n conf_matrix = np.zeros((n,n))\r\n for i in range(n):\r\n y_hat = self.predict(X[y==i])\r\n for j in range(n):\r\n conf_matrix[i,j] = sum(y_hat==j)\r\n conf_matrix[i] = conf_matrix[i]/sum(conf_matrix[i])\r\n return np.round(conf_matrix, 2)\r\n","repo_name":"InZdark/classification_algorithms","sub_path":"classification_algorithms/binomial_classifier.py","file_name":"binomial_classifier.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71235368693","text":"import logging\nimport time\nimport json\nimport boto3\nfrom slack_bolt import App\nfrom slack_bolt.adapter.aws_lambda import SlackRequestHandler\n\n# set up input keys\nmandatory_keys = {\"symbol\", \"date_from\", \"algos\"}\noptional_keys = {\n \"date_to\",\n \"resolution\",\n \"search_period\",\n \"notify_method\",\n \"notify_recipient\",\n \"target_ta_confidence\",\n}\n\n\n# used to raise an exception when we can't find the ta-automation step function\nclass StepFunctionNotFoundException(Exception):\n ...\n\n\n# process_before_response must be True when running on FaaS\napp = App(\n process_before_response=True,\n # token=bot_token,\n # signing_secret=signing_token,\n)\n\n\n@app.middleware # or app.use(log_request)\ndef log_request(logger, body, next):\n logger.debug(body)\n return next()\n\n\ndef respond_to_slack_within_3_seconds(body, ack):\n # i don't understand this block of boilerplate at all, given that the execution proceeds regardless of usage\n # if body.get(\"text\") is None:\n # ack(f\":x: Usage: {command} (description here)\")\n # else:\n # title = body[\"text\"]\n\n # for whatever reason I can't find a way of killing the call here, so I'll have to let it through to the state machine and let it fail\n ack(f\"Just a sec while I check your inputs\")\n\n\ndef get_step_function(client):\n # find the TA-analysis step function via tagging\n # client is an instance of boto3 stepfunctions client\n machines = client.list_state_machines()\n\n # machines is a list of all of the state machines, so need to iterate through looking for the right one\n for machine in machines[\"stateMachines\"]:\n # grab the tags for this state machine\n tags = client.list_tags_for_resource(resourceArn=machine[\"stateMachineArn\"])\n\n # iterate through all tags to the find the one we want. i could use if \"tag\" in tags.keys() but whatever\n for tag in tags[\"tags\"]:\n if tag[\"key\"] == \"aws:cloudformation:stack-name\":\n if tag[\"value\"] == \"ta-automation\":\n return machine\n\n # if we got here, we failed\n raise StepFunctionNotFoundException(\n \"Unable to find step function with tag aws:cloudformation:stack-name=ta-automation\"\n )\n\n\ndef do_ta_usage():\n usage = \"Mandatory parameters: symbol date_from and algos\\n\"\n usage += \"Optional parameters: date_to resolution search_period notify_method and notify_recipient\\n\"\n usage += \"Examples:\\n\"\n usage += \"/ta symbol=abc date_from=2022-01-01T04:16:13+10:00 algos=awesome-oscillator,stoch target_ta_confidence=7\\n\"\n usage += \"/ta symbol=abc date_from=2022-01-01T04:16:13+10:00 date_to=2022-01-05T12:12:12+10:00 algos=accumulation-distribution target_ta_confidence=7 resolution=5d search_period=15\\n\"\n return usage\n\n\ndef da_ta_validate(text):\n parameters = text.split()\n\n valid_parameters = {}\n found_keys = set()\n errors = \"\"\n error_found = False\n\n valid_keys = mandatory_keys.union(optional_keys)\n\n for parameter in parameters:\n split_parameter = parameter.split(\"=\")\n if len(split_parameter) != 2:\n errors += f\"Input parameter set without value assignment: {split_parameter[0]}=what?\\n\"\n error_found = True\n else:\n valid_parameters[split_parameter[0]] = split_parameter[1]\n found_keys.add(split_parameter[0])\n\n # see if there's an invalid parameter specified\n if len(found_keys.difference(valid_keys)) > 0:\n errors += f\"Invalid key specified: {str(found_keys.difference(valid_keys))}\\n\"\n error_found = True\n\n # if a mandatory key was omitted\n if len(mandatory_keys.difference(found_keys)) > 0:\n errors += (\n f\"Mandatory key missing: {str(mandatory_keys.difference(found_keys))}\\n\"\n )\n error_found = True\n\n return error_found, errors, valid_parameters\n\n\n# execute the slash command\ndef do_ta(respond, body):\n # validate the inputs\n if body.get(\"text\") is None:\n # no parameters were given\n respond(do_ta_usage())\n else:\n # validate the input\n errors_found, error_messages, valid_parameters = da_ta_validate(body[\"text\"])\n\n if errors_found:\n # input is bad - missing or unknown parameters\n respond(error_messages)\n respond(do_ta_usage())\n\n else:\n # execute the step function and return the output\n\n # first we need to format the step machine request\n # expand algos\n algo_list = []\n # first check if more than one was specified\n if \",\" in valid_parameters[\"algos\"]:\n input_algos = valid_parameters[\"algos\"].split(\",\")\n for this_algo in input_algos:\n algo_list.append({this_algo: None})\n else:\n # only one was specified\n algo_list.append({valid_parameters[\"algos\"]: None})\n\n # job structure\n # mandatories first\n ta_job = {\n \"jobs\": [\n {\n \"symbol\": valid_parameters[\"symbol\"],\n \"date_from\": valid_parameters[\"date_from\"],\n \"ta_algos\": algo_list,\n }\n ]\n }\n\n for optional in optional_keys:\n if valid_parameters.get(optional) != None:\n ta_job[\"jobs\"][0][str(optional)] = valid_parameters[str(optional)]\n\n respond(\n f\"Okay your inputs are all good, now sending it for processing. This might take 15 seconds or more so be patient please\"\n )\n # find the step machine so we can get its arn\n ta_automation_machine = get_step_function(client)\n\n # call the state machine\n state_machine_invocation = client.start_execution(\n stateMachineArn=ta_automation_machine[\"stateMachineArn\"],\n name=body[\"trigger_id\"],\n input=json.dumps(ta_job),\n )\n\n # loop til we get the state machine response back\n while True:\n time.sleep(5)\n\n # check if the state machine is still running\n job_execution = client.describe_execution(\n executionArn=state_machine_invocation[\"executionArn\"]\n )\n\n # if its done, then we can bust out\n if job_execution[\"status\"] == \"SUCCEEDED\":\n break\n\n # grab the output and load it as json\n state_machine_output = json.loads(job_execution[\"output\"])\n\n # start responding\n response_message = (\n f'Finished analysis for {state_machine_output[\"job\"][\"symbol\"]}:\\n'\n )\n\n for analysis in state_machine_output[\"ta_analyses\"]:\n response_message += f' - {str(list(analysis[\"ta_algo\"].keys())[0])} {analysis[\"ta_analysis\"][\"confidence\"]}/10 confidence <{analysis[\"graph_url\"]}|Graph link>\\n'\n\n respond(response_message)\n\n\n# used by all functions so its a global\nclient = boto3.client(\"stepfunctions\")\n\n# register commands and handlers\ncommand = \"/ta\"\napp.command(command)(ack=respond_to_slack_within_3_seconds, lazy=[do_ta])\n\nSlackRequestHandler.clear_all_log_handlers()\nlogging.basicConfig(format=\"%(asctime)s %(message)s\", level=logging.INFO)\n\n\ndef lambda_handler(event, context):\n slack_handler = SlackRequestHandler(app=app)\n return slack_handler.handle(event, context)\n","repo_name":"chris-t-fernando/ta-notification","sub_path":"ta-automation/slackbot/slackbot.py","file_name":"slackbot.py","file_ext":"py","file_size_in_byte":7545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2046057439","text":"#!/usr/bin/env python3\n\"\"\" 2021/20: Trench Map \"\"\"\n\nimport sys\n\nrule, image = sys.stdin.read().strip().split(\"\\n\\n\")\n\nassert len(rule) == 512\nassert rule[0] == \"#\"\nassert rule[-1] == \".\"\n\np = set(\n (x, y)\n for y, ln in enumerate(image.split())\n for x, c in enumerate(ln)\n if c == \"#\"\n)\n\nfor r in range(1, 50 + 1):\n pp = set()\n\n # In odd-numbered steps, we only track the cells that turn off because an\n # infinite number turn on. In even-numbered steps, we only track the cells\n # that turn on because an infinite number turn off.\n t = \".\" if r % 2 else \"#\"\n\n min_x, *_, max_x = sorted(x for (x, y) in p)\n min_y, *_, max_y = sorted(y for (x, y) in p)\n for y in range(min_y - 1, max_y + 2):\n for x in range(min_x - 1, max_x + 2):\n n = sum(\n x*pow(2, i)\n for i, x in enumerate(\n (x + dx, y + dy) in p if r % 2 else (x + dx, y + dy) not in p\n for dy in [1, 0, -1]\n for dx in [1, 0, -1]\n )\n )\n\n if rule[n] == t:\n pp.add((x, y))\n\n p = pp\n\n if r in { 2, 50 }:\n print(\"Part {}:\".format({2: \"1\", 50: \"2\"}[r]),\n len(p)\n )\n","repo_name":"hughcoleman/advent-of-code","sub_path":"2021/20.py","file_name":"20.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"21"} +{"seq_id":"33354690368","text":"from threading import Thread\r\n\r\nfrom GUI.DashComponents.DashManagerComponent import run_dash\r\nfrom GUI.WindowComponents.GenericWindow import GenericWindow\r\nfrom GUI.WindowComponents.HomePageWindow import HomePageWindow\r\nfrom GUI.WindowComponents.ManualClassifierWindow import ManualClassifierWindow\r\nfrom GUI.WindowComponents.DashboardWindow import OverviewDashboardWindow\r\nfrom GUI.WindowComponents.WaitingWindow import WaitingWindow\r\n\r\n\r\nclass GUIManager:\r\n def __init__(self, process_manager):\r\n self.process = None\r\n self.window = None\r\n self.process_manager = process_manager\r\n self.go_to_home_page()\r\n\r\n self.page = 0\r\n self.params = None\r\n\r\n self.message = ''\r\n\r\n def go_to_home_page(self):\r\n if self.window:\r\n self.window.close()\r\n self.window = HomePageWindow(self)\r\n self.window.show()\r\n\r\n def go_to_manual_classifier_page(self):\r\n if self.window:\r\n self.window.close()\r\n self.window = ManualClassifierWindow(self)\r\n self.window.show()\r\n\r\n def go_to_waiting_page(self, message, go_back_button_text, number_labels):\r\n if self.window:\r\n self.window.close()\r\n self.window = WaitingWindow(self, message, go_back_button_text, number_labels)\r\n self.window.show()\r\n\r\n def go_to_generic_page(self, message):\r\n if self.window:\r\n self.window.close()\r\n self.window = GenericWindow(self, message)\r\n self.message = ''\r\n self.window.show()\r\n\r\n def go_to_dashboard(self):\r\n if self.window:\r\n self.window.close()\r\n\r\n self.process = Thread(target=run_dash)\r\n self.process.start()\r\n\r\n self.window = OverviewDashboardWindow(self, self.process)\r\n self.window.show()\r\n","repo_name":"riccardogibello/e_health-project","sub_path":"code/GUI/GUIManager.py","file_name":"GUIManager.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42930191905","text":"def eh_primo(num):\n contador= 2\n if num==1 or num==0:\n return False\n while contador 2:\n mes:('f({:.2f}) = {:.2f}'.format(x, sqrt(x - 2)))\nelse:\n mes:('{:.2f} {}'.format(x, '∉ dom(f)'))\n\n#uitvoer\nprint(mes)\n\n","repo_name":"baptiste2002/Informatica5","sub_path":"Toetsen/Irrationale functie.py","file_name":"Irrationale functie.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23166549306","text":"import math\r\n\r\n\r\nclass DANU_VectorCompare:\r\n\r\n def __init__(self, query_structure, document_structure):\r\n \r\n self.queries = query_structure\r\n self.documents = document_structure\r\n self.no_of_documents = len(self.documents)\r\n #self.average_length_of_all_documents = self.average_document_length()\r\n self.k = 1.2\r\n\r\n\r\n # concordance is the word count in a document\r\n '''\r\n def concordance(self):\r\n for key, value in self.documents.items():\r\n \tdocument = value[1]\r\n\r\n\t if type(document) != str:\t \r\n\t raise ValueError('Supplied Argument should be of type string')\r\n\t con = {}\r\n\t for word in document.split(' '):\r\n\t if word in con:\r\n\t #if con.has_key(word):\r\n\t con[word] = con[word] + 1\r\n\t else:\r\n\t \t con[word] = 1\r\n\t return con\t\r\n '''\r\n \r\n def magnitude(self, concordance):\r\n #concordance = concordance(self)\r\n if type(concordance) != dict:\r\n raise ValueError('Supplied Argument should be of type dict')\r\n total = 0\r\n for word,count in concordance.items():\r\n total += count ** 2\r\n return math.sqrt(total)\t\r\n\r\n\r\n def relation(self,concordance1, concordance2):\r\n if type(concordance1) != dict:\r\n raise ValueError('Supplied Argument 1 should be of type dict')\r\n if type(concordance2) != dict:\r\n raise ValueError('Supplied Argument 2 should be of type dict')\r\n relevance = 0\r\n topvalue = 0\r\n for word, count in concordance1.items():\r\n if word in concordance2:\r\n #if concordance2.has_key(word):\r\n topvalue += count * concordance2[word]\r\n if (self.magnitude(concordance1) * self.magnitude(concordance2)) != 0:\r\n return topvalue / (self.magnitude(concordance1) * self.magnitude(concordance2))\r\n else:\r\n return 0\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"DanuShandra/Complex-Answer-Retrieval-System","sub_path":"danu_vector_compare.py","file_name":"danu_vector_compare.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11012050797","text":"import sys\ninput = sys.stdin.readline\n\n# 크기 N / 합을 구해야하는 횟수 M \nN, M = map(int, input().split())\n\n# input data\nmat = [ list(map(int, input().split())) for _ in range(N)]\nmat_sum = [[0] * (N+1) for _ in range(N+1)]\n\n# Do > (1, 1)부터 각 칸까지의 합을 구하기 \n # (1,1)부터 mat[0][0]값으로 채워지면서 시작\n # 따로 mat_sum을 초기화할 필요는 없음\nfor i in range(1, N+1):\n for j in range(1, N+1):\n mat_sum[i][j] = mat_sum[i-1][j] + mat_sum[i][j-1] - mat_sum[i-1][j-1] + mat[i-1][j-1]\n\n# # M개의 명령\nfor _ in range(M):\n x1, y1, x2, y2 = map(int, input().split())\n res = mat_sum[x2][y2] - mat_sum[x1-1][y2] - mat_sum[x2][y1-1] + mat_sum[x1-1][y1-1]\n print(res)\n\n","repo_name":"MilanoBeer/Algorithm_Sol","sub_path":"Prefx-Sum/BJ_11660_구간합구하기5.py","file_name":"BJ_11660_구간합구하기5.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23275203791","text":"\"\"\"Various things copied from my personal utils repo\"\"\"\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\n\n\ndef compose(*funcs):\n def composed_func(*args, **kwargs):\n result = None\n for func in reversed(funcs):\n result = func(*args, **kwargs) if result is None else func(result)\n return result\n return composed_func\n\ndef scaled_global_norm(tree):\n size, norm = jax.tree_util.tree_reduce(lambda carry, leaf: (carry[0] + leaf.size, carry[1] + jnp.sum(leaf ** 2)), tree, jnp.zeros(2))\n return (norm / size) ** 0.5\n\ndef interpolate_nearest(arr, scale_factor):\n # only supports CWH\n if len(arr.shape) != 3:\n raise ValueError(f'Expected 3 dimensional tensor, got shape: {arr.shape}')\n\n channels, in_height, in_width = arr.shape\n\n height = int(in_height * scale_factor)\n width = int(in_width * scale_factor)\n\n read_channels = jnp.arange(channels)[:, None, None]\n read_rows = jnp.linspace(0, in_height - 1, height)[None, :, None]\n read_cols = jnp.linspace(0, in_width - 1, width)[None, None, :]\n\n read_rows = jnp.round(read_rows).astype(jnp.int32)\n read_cols = jnp.round(read_cols).astype(jnp.int32)\n\n return arr[\n jnp.tile(read_channels, (1, height, width)),\n jnp.tile(read_rows, (channels, 1, width)),\n jnp.tile(read_cols, (channels, height, 1)),\n ]\n\ndef maybe_hk_dropout(rate, value):\n key = hk.maybe_next_rng_key()\n if key is not None:\n value = hk.dropout(key, rate, value)\n return value\n\ndef swish(x):\n return x * jax.nn.sigmoid(x)\n\ndef torch_init_conv(n_in, kernel_size, n_spatial=2):\n stddev = 1 / np.sqrt(n_in * (kernel_size ** n_spatial))\n return hk.initializers.RandomUniform(-stddev, stddev)\n\ndef make_conv(*args, in_channels, **kwargs):\n kwargs = {\n 'kernel_shape': 3,\n 'stride': 1,\n 'padding': (1, 1),\n 'data_format':'NCHW',\n 'with_bias': True,\n **kwargs\n }\n\n init_fn = torch_init_conv(in_channels, kwargs['kernel_shape'])\n kwargs['w_init'] = init_fn\n if kwargs['with_bias']:\n kwargs['b_init'] = init_fn\n\n return hk.Conv2D(*args, **kwargs)\n\nclass PmeanBatchNormWithoutState(hk.Module):\n def __init__(self, axis_name='batch', name='bn'):\n super().__init__(name=name)\n self.axis_name = axis_name\n\n def __call__(self, x):\n offset = hk.get_parameter('bn_offset', shape=(x.shape[0],), init=hk.initializers.Constant(0.))\n scale = hk.get_parameter('bn_scale', shape=(x.shape[0],), init=hk.initializers.Constant(1.))\n if hk.running_init():\n mean = jnp.zeros(x.shape[0])\n variance = jnp.zeros(x.shape[0])\n else:\n mean = jnp.mean(jax.lax.pmean(x, self.axis_name), axis=[1, 2])\n second_moment = jnp.mean(jax.lax.pmean(x ** 2, self.axis_name), axis=[1,2])\n\n variance = second_moment - mean ** 2\n mean = mean[:, None, None]\n variance = variance[:, None, None]\n\n normalized = ((x - mean) / jnp.sqrt(variance + 1e-5)) * scale[:, None, None] + offset[:, None, None]\n return normalized\n\ndef torch_conv_kernel_to_hk(kernel):\n return jnp.transpose(kernel, [2, 3, 1, 0])\n","repo_name":"davisyoshida/vqgan-haiku","sub_path":"vqgan/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33853397453","text":"import pygame, time, math, random, sys\nfrom pygame.locals import *\npygame.init()\nx = 1250\ny = 650\nscreen = pygame.display.set_mode((x , y)) # 設定介面大小\nbackground = pygame.Surface((x , y)) # 設定畫布大小\nbackground.fill(( 0 , 0 , 120 )) # 填入顏色(之後可調整)\nFPS = 60\nclock = pygame.time.Clock()\nADD_FIRE_RATE = 100\nabc = 10\n\nclass Superdonut(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n super().__init__()\n self.raw_image = pygame.image.load('superdonut.png').convert_alpha()\n self.image = pygame.transform.scale(self.raw_image, (150, 150)) # 改變主角大小\n self.rect = self.image.get_rect()\n self.rect.center = ( 50 , 500 )\n self.rect.width , self.rect.height = ( 100 , 100 )\n self.isjump = False\n self.jumpspeed = 18 # 跳躍初速度,之後可調整\n \n def donut(self): # donut用來呼叫主角圖片\n screen.blit(self.image , self.rect.center ) # 顯示主角\n\n def move(self):\n speed = 10 # 運動速度,之後可調整\n if pressed_keys[K_RIGHT] and self.rect.centerx < x:\n self.rect.centerx += speed\n if pressed_keys[K_LEFT] and self.rect.centerx > 0:\n self.rect.centerx -= speed\n \n def jump(self):\n if self.isjump == True: # 執行跳躍\n if self.jumpspeed >= -18:\n if self.jumpspeed > 0:\n self.rect.centery -= self.jumpspeed** 2 * 0.1 * 1\n elif self.jumpspeed < 0:\n self.rect.centery += self.jumpspeed** 2 * 0.1 * 1\n self.jumpspeed -= 1\n else:\n self.isjump = False\n self.jumpspeed = 18\n \nsuperdonut = Superdonut()\n\nclass Fireball(pygame.sprite.Sprite):\n vel = 10\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n super().__init__()\n self.fireball = pygame.image.load(\"fireball.png\")\n self.rect = self.fireball.get_rect()\n self.rect.center = (x,random.randint(50,600))\n self.rect.width , self.rect.height = ( 30 , 30 )\n #self.rect = pygame.Rect(self.fireball_rect.left, self.fireball_rect.top, 50, 50)\n def update(self):\n screen.blit(self.fireball, self.rect)\n\n if self.rect.left > 0:\n self.rect.left -= self.vel\n else:\n self.rect.center = (x,random.randint(50,600))\n\nfireball = Fireball()\nfire_list = []\nadd_fire_rate = 0 \n\nclass Enemy(pygame.sprite.Sprite):\n dropspeed = 10\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n super().__init__()\n self.enemy = pygame.image.load(\"apple.png\")\n self.rect = self.enemy.get_rect()\n self.rect.center = (random.randint(50, 1050),25)\n self.rect.width , self.rect.height = ( 50 , 50 )\n\n def drop(self):\n screen.blit(self.enemy , self.rect)\n if self.rect.top < y:\n self.rect.centery += self.dropspeed\n else:\n self.rect.right = random.randint(50, 1050)\n self.rect.centery = 0\n\nenemy = Enemy()\n\nclass Blood:\n def __init__(self):\n self.raw_image = pygame.image.load(\"blood.png\")\n self.image = pygame.transform.scale(self.raw_image, (70, 70)) \n self.now_blood = 5\n self.b_x = 0\n def show(self):\n all_blood = [self.image]*self.now_blood\n position = [ 50 , 100 , 150 , 200 , 250 ]\n for i in range (len(all_blood)):\n screen.blit( self.image , (position[i],50) )\n def hurt(self):\n self.now_blood -= 1\nblood = Blood()\n\nwhile True: # 遊戲迴圈\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n pygame.quit()\n elif event.type == pygame.KEYDOWN:\n if event.key == (pygame.K_SPACE):\n superdonut.isjump = True\n clock.tick(FPS)\n pressed_keys = pygame.key.get_pressed()\n screen.blit(background, (0,0))\n screen.blit(50 , (500,500))\n blood.show()\n superdonut.donut()\n superdonut.move()\n superdonut.jump()\n add_fire_rate += 1\n if add_fire_rate == ADD_FIRE_RATE:\n add_fire_rate = 0\n new_flame = Fireball()\n fire_list.append(new_flame)\n for f in fire_list:\n if f.rect.left < 0:\n fire_list.remove(f)\n f.update()\n enemy.drop()\n for f in fire_list:\n if f.rect.colliderect(superdonut.rect):\n blood.hurt() \n fire_list.remove(f) \n if pygame.sprite.collide_rect( superdonut , enemy ):\n blood.hurt()\n enemy = Enemy()\n #寫入遊戲結束機制or扣血\n pygame.display.update()\n pygame.display.flip()","repo_name":"bkwmt/PBC109-1RunningGame","sub_path":"final project-collidetest.py","file_name":"final project-collidetest.py","file_ext":"py","file_size_in_byte":4739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19463183356","text":"from mocket import mocketize\nimport os\nimport socket\n\nBASE_DIR = os.path.dirname(os.path.realpath(__file__))\nSERVER_DIR = os.path.join(os.path.dirname(BASE_DIR), 'src/server')\n\n\n@mocketize\ndef test_unknown():\n conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n conn.connect(('localhost', 65432))\n\n # Send some data to the server\n conn.sendall('sample command'.encode())\n\n data = conn.recv(1024).decode()\n\n assert data == 'Unknown command'\n\n # Close the connection\n conn.close()\n","repo_name":"storyofhis/tugas-1-client-server-socket","sub_path":"tests/servertest1.py","file_name":"servertest1.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12464653308","text":"N = int(input())\nfor _ in range(N):\n x=input().rstrip()\n stack=[]\n sum = 0\n for i in x:\n if i ==\"O\":\n stack.append(i)\n else:\n for j in range(1,len(stack)+1):\n sum+=j\n stack = []\n if len(stack) !=0:\n for j in range(1,len(stack)+1):\n sum+=j\n\n print(sum)","repo_name":"BeomgiSo/Algorithm_Data","sub_path":"Algorithm_Datastructure/최종정리/class2_backjun/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25513485622","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 21 11:31:19 2019\n\n@author: Nishant Das \n\nLanguage Models calculate the probability of a sequence of words. \nWith this we can model sentences and determine if the a sentence is correct or incorrect given the probability of sequences of words in that given sentence.\nIf the probability of the sequence of words in a sentence follows a distribution typical of the language (English in this case), we can classify a sentence as being correct or incorrect, or real or not real.\n\nFor this project, I have created an algorithm to determine if a sentence given by a user is correct or incorrect. \n\nWords typically are used in association with other words. \nThis is typical when languages have a structure (Noun followed by a verb, etc.). \nHence, by looking at word(t) and word(t+1) occurrences (adjacent word occurrences), we can see how likely a sentence is real or not. \n\nBigram is a sequence of two consecutive words in a sentence. \n\nIn this project, I use the Brown Corpus (explained in the code development section) of words to train a Hidden Markov Model that is a variation of Viterbi Algorithm.\nThe Viterbi Algorithm is a Dynamic Programming algorithm to the the most likely sequence of hidden states – known as the Viterbi path. \nGiven the model, we can then pass English sentences into the model to predict if the sentence is an actual English sentence or not. \n\nThe Brown Corpus 500 samples of English-language text, totaling roughly one million words, compiled from works published in the United States in 1961.\n\n\n\n\n\"\"\"\n# Importing relevent libraries\nfrom __future__ import print_function, division\nfrom future.utils import iteritems\nfrom builtins import range, input\nimport numpy as np\n# Importing nltk and brown corpus\nimport nltk\n# Below line is a one time step - disabled after downloading\nnltk.download('brown')\nfrom nltk.corpus import brown\n\n\ndef get_sentences():\n # returns 57340 words of the Brown corpus in the form of sentences taken from various sources. \n # each sentence is represented as a list of words represented as individual string words\n return brown.sents()\n\ndef get_sentences_with_wordVecIds():\n sentences = get_sentences()\n indexed_sentences = []\n i = 2\n word2idx = {'START': 0, 'END': 1} #Made up words to signify start and end of a sentence. \n for sentence in sentences:\n indexed_sentence = []\n for word in sentence:\n word = word.lower()\n if word not in word2idx:\n word2idx[word] = i\n i += 1\n indexed_sentence.append(WordVecIds[word])\n indexed_sentences.append(indexed_sentence)\n print(\"Vocab size:\", i)\n return indexed_sentences, wordVecIds\n\nKEEP_WORDS = set([\n # Any words we may need to include in our Word Vector. \n])\n \ndef get_sentences_with_wordVecIds_limit_vocab(n_vocab=3000, keep_words=KEEP_WORDS):\n sentences = get_sentences()\n indexed_sentences = []\n\n i = 2\n\n# Initializing Relevent Dictionaries\n# Word vector in Id Number format and Word Format.\n# First and Second words of the Word vector are START & END\n wordVecIds = {'START': 0, 'END': 1}\n wordVecWords = ['START', 'END']\n\n# Counter to count hoew many times a word occurs in our Courpus\n word_idx_count = {\n 0: float('inf'), #Start & End are infinite counts\n 1: float('inf'),\n }\n\n for sentence in sentences:\n indexed_sentence = []\n for word in sentence:\n word = word.lower()\n if word not in wordVecIds:\n wordVecWords.append(word) # Add the word to the word vector as the 3rd word\n wordVecIds[word] = i # Initalize the count for that word in the word vector as the 3rd word\n i += 1\n\n # keep track of counts of a word for later sorting\n idx = wordVecIds[word] #index of a word\n word_idx_count[idx] = word_idx_count.get(idx,0) + 1 \n\n indexed_sentence.append(idx) # Building the numbered sentences\n indexed_sentences.append(indexed_sentence) \n\n# restrict vocab size\n # set all the words I want to keep to infinity\n # so that they are included when the words are sorted and filtered by count \n # common words\n for word in keep_words:\n word_idx_count[wordVecIds[word]] = float('inf')\n\n\n \n sorted_word_idx_count = sorted(word_idx_count.items(), key=lambda elem : elem[1], reverse=True) #Sorting in descending order\n word2idx_small = {}\n new_idx = 0\n idx_new_idx_map = {} #Upadating the ids of words in descending order to word vector in ID Number format\n for idx, count in sorted_word_idx_count[:n_vocab]: #for words upto the user defined numbber of words\n word = wordVecWords[idx]\n #print(word, count)\n word2idx_small[word] = new_idx\n idx_new_idx_map[idx] = new_idx\n new_idx += 1\n # let 'unknown' be the last word\n word2idx_small['UNKNOWN'] = new_idx \n unknown = new_idx\n\n assert('START' in word2idx_small)\n assert('END' in word2idx_small)\n for word in keep_words:\n assert(word in word2idx_small)\n\n # map old idx to new idx\n sentences_small = []\n for sentence in indexed_sentences:\n if len(sentence) > 1:\n new_sentence = [idx_new_idx_map[idx] if idx in idx_new_idx_map else unknown for idx in sentence]\n sentences_small.append(new_sentence)\n\n return sentences_small, word2idx_small\n\n\ndef get_bigram_probs(sentences, V, start_idx, end_idx, smoothing=1):\n # structure of bigram probability matrix will be:\n # (last word, current word) --> probability\n # we will use add 1 smoothing\n # note: we'll always ignore this from the END token\n bigram_probs_matrix = np.ones((V, V)) * smoothing # Matrices of 1's times smoothing, V is the number of words in the word vector. \n for sentence in sentences:\n for i in range(len(sentence)):\n \n if i == 0:\n # beginning word\n bigram_probs_matrix[start_idx, sentence[i]] += 1 # Count of Start w(t)\n else:\n # middle word\n bigram_probs_matrix[sentence[i-1], sentence[i]] += 1 # Counting occurances of w(t-1) w(t)\n # if we're at the final word\n # we update the bigram for last -> current\n # AND current -> END token\n if i == len(sentence) - 1: \n # final word \n bigram_probs_matrix[sentence[i], end_idx] += 1 # Count of w(t) End\n\n # normalize the counts along the rows to get probabilities\n bigram_probs_matrix /= bigram_probs_matrix.sum(axis=1, keepdims=True) #Sum(Summation(w(t(i)-1)*w(t)/(all w(t-1)w(t)) \n #combinations ffor a given w(t) to get the probabilties Matrix\n return bigram_probs_matrix\n\nif __name__ == '__main__':\n # load in the data\n # note: sentences are already converted to sequences of word indexes\n # note: you can limit the vocab size if you run out of memory\n sentences, wordVecIds = get_sentences_with_wordVecIds_limit_vocab(10000)\n # sentences, word2idx = get_sentences_with_word2idx()\n\n # vocab size\n V = len(wordVecIds)\n #print(\"Vocab size:\", V)\n # we will also treat beginning of sentence and end of sentence as bigrams\n # START -> first word\n # END -> last word \n #Defining the start and end index numbers\n start_idx = wordVecIds['START']\n end_idx = wordVecIds['END']\n\n# Markov Model\n # a matrix where:\n # row = last word\n # col = current word\n # value at [row, col] = p(current word | last word)\n bigram_probs_matrix = get_bigram_probs(sentences, V, start_idx, end_idx, smoothing=0.1)\n # a function to calculate normalized log prob score for a user defined sentence\n def get_score(sentence):\n score = 0\n for i in range(len(sentence)):\n if i == 0:\n # For the first word, calculate the Start Word occurance matrix and take the log to update the score.\n score += np.log(bigram_probs_matrix[start_idx, sentence[i]])\n else:\n # Do the same for all words in the sentence tille the final word\n # middle word\n score += np.log(bigram_probs_matrix[sentence[i-1], sentence[i]])\n # final word\n score += np.log(bigram_probs_matrix[sentence[-1], end_idx])\n\n # normalize the final score by divding by the number of words + 1\n return score / (len(sentence) + 1)\n\n\n # a function to map word indexes back to real words\n idx2word = dict((v, k) for k, v in iteritems(wordVecIds))\n def get_words(sentence):\n for i in sentence:\n return ' '.join(idx2word[i])\n ########################return ' '.join( for i in sentence)\n\n# Generating some fake sentences randomly\n\n # when we generate fake sentence, we want to ensure not to use\n # start -> word or end -> word combinations\n sample_probs = np.ones(V) # Matrix of ones\n sample_probs[start_idx] = 0 #Set start and end combninations to zero\n sample_probs[end_idx] = 0\n sample_probs /= sample_probs.sum() #\n\n\n# User Interface\n\n # test our model on real and fake sentences\n while True:\n \n # input your own sentence\n custom = input(\"Enter your own sentence:\\n\\n\")\n custom = custom.lower().split()\n\n # check that all words exist in wordVecIds (otherwise, we can't get score)\n bad_sentence = False\n for word in custom:\n if word not in wordVecIds:\n bad_sentence = True\n\n if bad_sentence:\n print(\"Sorry, you entered words that are not in the Corpus Vocabulary\")\n else:\n # convert sentence into list of indexes\n custom = [wordVecIds[word] for word in custom]\n print(\"SCORE:\", get_score(custom))\n if get_score(custom)>(-8.2): # Decided on this number after various trials - could possibily right a classification model for this. \n print(\"REAL SENTENCE\")\n else:\n print(\"NOT A REAL SENTENCE\")\n\n\n cont = input(\"Continue? [Y/n]\")\n if cont and cont.lower() in ('N', 'n'):\n print(\"Thank You!\")\n break\n \n\n","repo_name":"NishantD6/Check-if-sentence-is-correct-or-not","sub_path":"NDas_DP_HMM_NLP.py","file_name":"NDas_DP_HMM_NLP.py","file_ext":"py","file_size_in_byte":9669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11092928164","text":"# -*-coding:utf-8-*-\n__author__ = 'Sunny'\n\nimport imaplib\nimport email\nimport re\nfrom datetime import datetime, timedelta\nimport time\nimport requests\n\nfpath = '***'\n\ndef send_notice(text):\n line_url = \"***\"\n params = {\"message\": \"{}\".format(text)}\n r = requests.post(line_url, headers=headers, params=params)\n # print(r.status_code) # 200\n\n params = {\"message\": \"{}\".format(text)}\n r2 = requests.post(line_url, headers=headers2, params=params)\n\n\ndef main():\n # credentials\n username = \"***\"\n\n # generated app password\n app_password = \"***\"\n\n # https://www.systoolsgroup.com/imap/\n gmail_host = 'imap.gmail.com'\n\n # set connection\n mail = imaplib.IMAP4_SSL(gmail_host)\n\n # login\n mail.login(username, app_password)\n\n # select inbox\n mail.select(\"INBOX\")\n\n # select specific mails\n _, selected_mails = mail.search(None, '(FROM \"***\")')\n\n # total number of mails from specific user\n # print(\"Total Messages from noreply@tradingview.com:\", len(selected_mails[0].split()))\n count = 0\n current_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n print(current_time, 'Scan Loop')\n for num in reversed(selected_mails[0].split()):\n if count < 30:\n _, data = mail.fetch(num, '(RFC822)')\n _, bytes_data = data[0]\n\n # convert the byte data to message\n email_message = email.message_from_bytes(bytes_data)\n # email_message = email.message_from_string(bytes_data)\n print(\"===========================================\")\n\n # access data\n # print(\"Subject: \", email_message[\"subject\"])\n # print(\"To:\", email_message[\"to\"])\n # print(\"From: \", email_message[\"from\"])\n # print(\"Date: \", email_message[\"date\"])\n # print(re.split(r'[^\\S\\n\\r]+', email_message[\"date\"].strip()))\n\n d = re.split(r'[^\\S\\n\\r]+', email_message[\"date\"].strip())[1]\n m = re.split(r'[^\\S\\n\\r]+', email_message[\"date\"].strip())[2]\n m = month(m)\n y = re.split(r'[^\\S\\n\\r]+', email_message[\"date\"].strip())[3]\n t = re.split(r'[^\\S\\n\\r]+', email_message[\"date\"].strip())[4]\n u = re.split(r'[^\\S\\n\\r]+', email_message[\"date\"].strip())[5]\n\n date_str = '{}-{}-{} {}'.format(y, m, d, t)\n date = datetime.strptime(date_str, '%Y-%m-%d %H:%M:%S')\n date_utc8 = datetime.strptime(date_str, '%Y-%m-%d %H:%M:%S') + timedelta(hours=8)\n print(date_utc8)\n\n for part in email_message.walk():\n if part.get_content_type() == \"text/plain\" or part.get_content_type() == \"text/html\":\n message = part.get_payload(decode=True)\n \n usdttype = re.search('Your (.*)USDT', message.decode())\n signal = re.search(';\">(.*)

', message.decode())\n print(usdttype.group(1), signal.group(1), count)\n print(\"==========================================\\n\")\n logline = str(date_utc8) + ' ' + usdttype.group(1) + ' ' + signal.group(1) + '\\n'\n\n with open(fpath, 'r') as file:\n content = file.read()\n if logline in content:\n file.close()\n else:\n send_notice(logline)\n f = open(fpath, 'a')\n f.write(logline)\n f.close()\n break\n count = count + 1\n else:\n print('Scan Completed')\n print(' ')\n print(' ')\n break\n time.sleep(10)\n # break #\n # break #\n\n\nif __name__ == \"__main__\":\n while 1:\n try:\n main()\n except Exception as e:\n print(e)\n time.sleep(5)\n","repo_name":"wttang109/autoscan","sub_path":"gmail_tv_imap.py","file_name":"gmail_tv_imap.py","file_ext":"py","file_size_in_byte":3912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6492743020","text":"from telebot import TeleBot, types\nfrom flask import Flask, request, abort\nfrom time import sleep\n\nTOKEN = ''\nSECRET = '646f3ba8-7073-43de-ba41-ecc73250cbfd'\nWEBHOOK_HOST = \"https://gmwcbot.pythonanywhere.com/\" # do not forget to change username!\nWEBHOOK_URL = WEBHOOK_HOST + SECRET\n\nbot = TeleBot(TOKEN, threaded=False)\n\nbot.remove_webhook()\nsleep(1)\nbot.set_webhook(url=WEBHOOK_URL, max_connections=1) # use lower values to limit the load on your bot’s server\n\napp = Flask(__name__)\n\n\n@app.route('/{}'.format(SECRET), methods=[\"POST\"])\ndef telegram_webhook():\n if request.headers.get('content-type') == 'application/json':\n # json_string = request.get_data().decode('utf-8')\n json_string = request.stream.read().decode('utf-8')\n update = types.Update.de_json(json_string)\n bot.process_new_updates([update])\n return 'ok', 200\n else:\n abort(403)\n\n\n# Empty webserver index, return nothing, just HTTP 200\n@app.route('/')\ndef index():\n return 'Hello from Flask!'\n\n\n# Bot program code ...\n@bot.message_handler(commands=['start'])\ndef welcome(message):\n bot.reply_to(message, 'Hello')\n","repo_name":"pkmnxprss/gm-with-cats","sub_path":"deploy-pythonanywhere/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7535368193","text":"#!/usr/bin/env python3\n# coding: utf-8\n\nfrom os import environ, path, makedirs, execvp\nfrom time import sleep\nimport fcntl\nimport jinja2\nimport logging\nimport re\nimport socket\nimport struct\nimport sys\n\nLOG_LEVEL = environ.get('LOG_LEVEL', 'INFO').upper()\nZONE_TEMPLATE = environ.get(\"ZONE_TEMPLATE\", \"./zone.j2\")\nNAMED_TEMPLATE = environ.get(\"NAMED_TEMPLATE\", \"./named.conf.j2\")\nNAMED_OUTPUT = environ.get(\"NAMED_RENDERED\", \"/data/bind/etc/named.conf\")\nTTL = environ.get(\"ZONE_TTL\", \"10s\")\nNS = environ.get(\"ZONE_NS\", \"ns\")\nDOMAIN = environ[\"ZONE_DOMAIN\"] # Domain without leading dot\nHOSTS = environ[\"ZONE_HOSTS\"].split() # Space seperated list\nZONE_OUTPUT = environ.get(\"ZONE_RENDERED\", \"/data/bind/etc/{}.zone\".format(DOMAIN))\n\n# Dummy socket used for fcntl functions\n_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\nlogging.basicConfig(level=LOG_LEVEL, format=\"[%(levelname)s %(asctime)s] %(message)s\", datefmt=\"%m-%d %H:%M:%S\")\nlogger = logging.getLogger('zonerender')\n\ndef render(zone_template, **context):\n dirname, filename = path.split(zone_template)\n env = jinja2.Environment(\n loader=jinja2.FileSystemLoader(dirname or './')\n )\n return env.get_template(filename).render(**context)\n\ndef writefile(filepath, text):\n makedirs(path.dirname(filepath), exist_ok=True)\n with open(filepath, 'w') as f:\n f.write(text)\n\ndef _ifctl(ifname, code):\n if isinstance(ifname, str):\n ifname = ifname.encode('utf-8')\n\n return fcntl.ioctl(\n _socket.fileno(),\n code,\n struct.pack('256s', ifname[:15])\n )\n\ndef ifaddr(ifname):\n return '.'.join(str(x) for x in _ifctl(ifname, 0x8915)[20:24]) # SIOCGIFADDR\n\ndef parsehosts(hosts):\n result = dict()\n for host in hosts:\n m = re.fullmatch(r'([\\w.]+)=([0-9.]+)', host)\n if m is None:\n raise ValueError(\"host entry '{}' is not valid; must be =\".format(host))\n name, address = m.group(1, 2)\n result[name] = address\n return result\n\nif __name__ == \"__main__\":\n hosts = parsehosts(HOSTS)\n if NS not in hosts:\n hosts[NS] = ifaddr(\"eth0\")\n\n # Render the zone file\n zone_text = render(ZONE_TEMPLATE, ns=NS, hosts=hosts, domain=DOMAIN, ttl=TTL)\n writefile(ZONE_OUTPUT, zone_text)\n logging.info(\"Rendered zone file for %s\", DOMAIN)\n logging.debug(zone_text)\n\n # Render the named.conf file\n named_text = render(NAMED_TEMPLATE, domain=DOMAIN)\n writefile(NAMED_OUTPUT, named_text)\n logging.info(\"Rendered named.conf\")\n logging.debug(named_text)\n\n # Exec the next program\n logging.info(\"Done rendering zone files. Now running '%s'\" % \" \".join(sys.argv[1:]))\n if len(sys.argv) > 1:\n execvp(sys.argv[1], sys.argv[1:])","repo_name":"teambi0s/InCTFi","sub_path":"2021/Network Pentest/Home Drive/server/ns/zonerender.py","file_name":"zonerender.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"en","doc_type":"code","stars":143,"dataset":"github-code","pt":"21"} +{"seq_id":"33763199231","text":"from typing import Optional\n\nfrom rdkit.Chem.rdchem import Mol\nfrom rdkit.Chem.rdmolfiles import MolToSmiles\n\n\nclass InvalidSmiles(ValueError):\n def __init__(self, smiles: str, msg: Optional[str] = None):\n if msg is None:\n msg = f'\"{smiles}\" is not a valid SMILES string'\n super().__init__(msg)\n self.smiles = smiles\n\n\nclass InvalidInchi(ValueError):\n \"\"\"\n Exception raised when converting invalid InChI strings.\n \"\"\"\n\n def __init__(self, inchi: str):\n super().__init__(f\"The following InChI string cannot be converted: {inchi}\")\n self.inchi = inchi\n\n\nclass InvalidReactionSmiles(InvalidSmiles):\n def __init__(self, reaction_smiles: str, msg: Optional[str] = None):\n if msg is None:\n msg = f'\"{reaction_smiles}\" is not a valid reaction SMILES string'\n super().__init__(smiles=reaction_smiles, msg=msg)\n\n\nclass InvalidMdl(ValueError):\n \"\"\"\n Exception raised when converting invalid MDL Mol strings.\n \"\"\"\n\n def __init__(self, mdl: str):\n super().__init__(f\"The following MDL string cannot be converted: {mdl}\")\n self.mdl = mdl\n\n\nclass SanitizationError(ValueError):\n def __init__(self, mol: Mol):\n message = \"Error when sanitizing RDKit Mol\"\n try:\n smiles = MolToSmiles(mol)\n message += \": \" + smiles\n except Exception:\n pass\n super().__init__(message)\n\n\nclass UnclearWhetherTokenized(ValueError):\n \"\"\"Exception raised when unclear if something was tokenized or not.\"\"\"\n\n def __init__(self, string: str):\n super().__init__(f'Cannot determine if \"{string}\" is tokenized.')\n","repo_name":"rxn4chemistry/rxn-chemutils","sub_path":"src/rxn/chemutils/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"21"} +{"seq_id":"10997123894","text":"import scipy.io.wavfile as waves\nfrom scipy.fft import fft\nimport scipy.fftpack as fftpk\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom tkinter import messagebox\n\ndef fase2():\n try:\n audio='Sound.wav'\n fsonido, sonido = waves.read(audio)\n\n # Extrae un canal en caso de estéreo\n canales=sonido.shape\n cuantos=len(canales)\n canal = 0 \n if (cuantos==1): # Monofónico\n uncanal=sonido\n if (cuantos>=2): # Estéreo\n uncanal=sonido[:,canal]\n\n # Phase que responde al audio original en la frecuencia\n N=len(uncanal)\n f=fft(uncanal)\n Y=np.unwrap(np.angle(f))\n X=(np.arange(0,1-1/N,1/N))*fsonido\n\n # Salida Grafica\n plt.plot(X[range(N//2)], Y[range(N//2)])\n plt.title('Fase 2 con Dominio en la Frecuencia')\n plt.xlabel(\"Frecuencia (Hz)\")\n plt.ylabel(\"Amplitud\")\n plt.show()\n except:\n messagebox.showwarning(\"Error\", \"El audio no existe, por favor grabe uno desde la app\")","repo_name":"Omar211219/Proyecto-Telecomunicaciones","sub_path":"Fase2.pyw","file_name":"Fase2.pyw","file_ext":"pyw","file_size_in_byte":1042,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16026668094","text":"# 수 이어쓰기, S3, 구현-그리디\n\nfrom sys import stdin\n\nA = stdin.readline().rstrip()\n\n# 1부터 수를 증가시키며 A의 숫자들이 하나씩 나올때까지 증가시키며 찾는다\n\nnum = 0\n\nwhile 1:\n num += 1\n s = str(num)\n\n while len(s) > 0 and len(A) > 0:\n if s[0] == A[0]: # 맨앞에꺼 일치하면 지운다\n A = A[1:]\n s = s[1:]\n\n if A == '':\n print(num)\n break\n","repo_name":"lookinmin/CodingTest","sub_path":"Greedy/BOJ_1515.py","file_name":"BOJ_1515.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16365357837","text":"#This function converts degrees from Celsius to Fahrenheit\ndef celsius_to_fahrenheit(temperature_input):\n return (temperature_input * (5/9)) + 32\n\n\n#This function computes the windchill based on the degrees in Fahrenheit and the hypothetical wind speed\ndef compute_windchill(temperature_final, current_wind_speed):\n windchill = 35.74 + (0.6215 * (temperature_final)) - (35.75 * (current_wind_speed**0.16)) + (0.4275 * (temperature_final)) * (current_wind_speed**0.16)\n return windchill\n\nloop2 = False\nprint(\"What is the temperature?\")\ntemperature_input = float(input(\">> \"))\nwhile not loop2:\n print(\"Fahrenheit or Celsius? (F/C)\")\n degree_type = input(\">> \")\n if degree_type.lower() == \"c\":\n temperature_final = celsius_to_fahrenheit(temperature_input)\n loop2 = True\n elif degree_type.lower() == \"f\":\n temperature_final = temperature_input\n loop2 = True\n else:\n print(\"That is an invalid unit, try again.\\n\")\n\ncurrent_wind_speed = 0\nwhile current_wind_speed != 60:\n current_wind_speed += 5\n windchill = compute_windchill(temperature_final, current_wind_speed)\n print(f\"At temperature {temperature_final:.1f}F, and wind speed at {current_wind_speed:.0f} mph, the windchill is: {windchill:.2f}F\")\n\n","repo_name":"ethan161/CSE111","sub_path":"Wind_chill_functions.py","file_name":"Wind_chill_functions.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29981295776","text":"import binascii\nimport hashlib\nimport logging\nimport os\nimport time\n\nimport regex\nimport socket\nimport threading\nimport paho.mqtt.client as mqtt\n\nfrom own_frame_command import OWNFrameCommand\nfrom own_frame_monitor import OWNFrameMonitor\n\n\nclass OpenWebNet:\n def __init__(self, options):\n self.logger = logging.getLogger(\"own2mqtt\")\n\n self.own_server_address = (options['own_server_ip'], options['own_server_port'])\n self.own_password = options['own_server_password']\n self.ACK = b'*#*1##'\n self.NACK = b'*#*0##'\n self.A_HEX = '736F70653E'\n self.B_HEX = '636F70653E'\n self.AUTH_START = b'*98*2##'\n self.SET_COMMAND = b'*99*0##'\n self.SET_MONITOR = b'*99*1##'\n self.KEEP_ALIVE = b'*#13**22##'\n\n self.mqtt_client = None\n self.mqtt_server_ip = options['mqtt_server_ip']\n self.mqtt_server_port = options['mqtt_server_port']\n self.mqtt_client_name = options['mqtt_client_name']\n self.mqtt_server_user = options['mqtt_server_user']\n self.mqtt_server_password = options['mqtt_server_password']\n self.mqtt_base_topic = options['mqtt_base_topic']\n\n self.thermo_zones = {}\n for thermo_zone in options['thermo_zones']:\n self.thermo_zones[str(thermo_zone)] = {}\n self.query_interval = options['query_interval']\n self.f520_ids = options['f520_ids']\n self.f522_ids = options['f522_ids']\n self.debug = options['debug']\n\n self.command_thread = self.monitor_thread = None\n self.command_socket = self.monitor_socket = None\n\n self.mqtt_ready = self.monitor_ready = self.command_ready = False\n\n def run(self):\n try:\n self.logger.info('Connecting to MQTT Server %s:%s', self.mqtt_server_ip, self.mqtt_server_port)\n self.mqtt_client = mqtt.Client(self.mqtt_client_name, True, {'base_topic': self.mqtt_base_topic, 'own_instance': self})\n self.mqtt_client.username_pw_set(self.mqtt_server_user, self.mqtt_server_password)\n self.mqtt_client.on_connect = self.on_mqtt_connect\n self.mqtt_client.on_disconnect = self.on_mqtt_disconnect\n self.mqtt_client.on_message = self.on_mqtt_message\n self.mqtt_client.connect(self.mqtt_server_ip, self.mqtt_server_port)\n self.mqtt_client.loop_start()\n\n self.monitor_thread = threading.Thread(target=self.monitor_start)\n self.monitor_thread.start()\n\n while not (self.mqtt_ready and self.monitor_ready and self.command_ready):\n self.command_start()\n\n # self.monitor_thread.join()\n # self.command_thread.join()\n\n except (KeyboardInterrupt, SystemExit):\n self.mqtt_client.loop_stop()\n self.mqtt_client.disconnect()\n self.monitor_socket.close()\n self.monitor_ready = False\n self.command_socket.close()\n\n def monitor_start(self):\n logger = logging.getLogger(\"own2mqtt.monitor\")\n while True:\n if self.mqtt_ready:\n self.monitor_ready = False\n try:\n self.logger.info('Starting MONITOR session with %s', self.own_server_address)\n\n self.monitor_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.monitor_socket.connect(self.own_server_address)\n data_received = self.monitor_socket.recv(4096)\n\n if data_received == self.ACK:\n self.monitor_socket.send(self.SET_MONITOR)\n\n data_received = self.monitor_socket.recv(4096)\n\n if data_received == self.AUTH_START:\n self.monitor_socket.send(self.ACK)\n\n if self.__authenticate(self.monitor_socket):\n last_frame = time.time()\n self.logger.info('MONITOR started')\n self.monitor_ready = True\n\n # Monitor each frame in socket\n while self.monitor_ready and (time.time() - last_frame) < 30:\n frames = self.read_monitor_socket()\n for frame in frames:\n last_frame = time.time()\n OWNFrameMonitor(frame, self)\n self.mqtt_client.publish(f'{self.mqtt_base_topic}/last_frame', payload=frame, qos=0, retain=False)\n else:\n self.monitor_ready = False\n self.logger.info('MONITOR Disconnected')\n except Exception as e:\n self.logger.info(e)\n if self.debug:\n if self.monitor_socket:\n self.monitor_socket.close()\n self.monitor_socket = None\n raise e\n time.sleep(5)\n\n def command_connect(self):\n self.logger.info('Starting COMMAND session with %s', self.own_server_address)\n\n self.command_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.command_socket.connect(self.own_server_address)\n data_received = self.command_socket.recv(4096)\n\n if data_received == self.ACK:\n self.command_socket.send(self.SET_COMMAND)\n\n data_received = self.command_socket.recv(4096)\n\n if data_received == self.AUTH_START:\n self.command_socket.send(self.ACK)\n\n if self.__authenticate(self.command_socket):\n self.logger.info('COMMAND started')\n self.command_ready = True\n\n def command_start(self):\n self.command_connect()\n\n # Send command requests\n self.write_socket(b'*#1*0##')\n\n for thermo_zone in self.thermo_zones.keys():\n self.write_socket(('*#4*%s##' % thermo_zone).encode())\n self.write_socket(('*#4*%s*60##' % thermo_zone).encode())\n\n self.total_energy_query()\n self.f522_start_power_request()\n\n frames = self.read_command_socket()\n self.logger.info(frames)\n if len(frames) > 0:\n last_frame = time.time()\n else:\n self.logger.info('COMMAND Disconnected')\n\n def write_socket(self, encoded_frame):\n while True:\n try:\n self.command_socket.send(encoded_frame)\n self.logger.debug('TX: %s' % encoded_frame.decode())\n break\n except (BrokenPipeError, IOError) as e:\n self.command_socket.close()\n self.command_socket = None\n self.command_ready = False\n self.command_connect()\n self.write_socket(encoded_frame)\n\n def f522_start_power_request(self):\n for (f522_id) in self.f522_ids:\n self.write_socket(f'*#18*7{f522_id}#0*#1200#1*1##'.encode())\n\n def total_energy_query(self):\n for (f520_id) in self.f520_ids:\n self.write_socket(f'*#18*5{f520_id}*51##'.encode())\n self.write_socket(f'*#18*5{f520_id}*53##'.encode())\n self.write_socket(f'*#18*5{f520_id}*54##'.encode())\n threading.Timer(self.query_interval['total_energy_query'], self.total_energy_query).start()\n\n def read_command_socket(self):\n return self.__read_socket(self.command_socket)\n\n def read_monitor_socket(self):\n return self.__read_socket(self.monitor_socket)\n\n def __authenticate(self, current_socket):\n self.logger.info('Authenticating...')\n rb_hex = self.__create_rb_hex()\n rb = self.__hex_to_decimal_string(rb_hex)\n ra_search = regex.search(\n r'\\*#(\\d{128})##', current_socket.recv(4096).decode())\n if not ra_search:\n return False\n ra_hex = self.__decimal_string_to_hex(ra_search.group(1))\n kab_hex = hashlib.sha256(self.own_password.encode()).hexdigest()\n\n client_hash = hashlib.new('sha256')\n client_hash.update(ra_hex.encode())\n client_hash.update(rb_hex.encode())\n client_hash.update(self.A_HEX.encode())\n client_hash.update(self.B_HEX.encode())\n client_hash.update(kab_hex.encode())\n client_digest = client_hash.hexdigest()\n client_digest_dec = self.__hex_to_decimal_string(client_digest)\n\n client_message = \"*#%s*%s##\" % (rb, client_digest_dec)\n current_socket.send(client_message.encode())\n\n server_hash = hashlib.new('sha256')\n server_hash.update(ra_hex.encode())\n server_hash.update(rb_hex.encode())\n server_hash.update(kab_hex.encode())\n server_digest = server_hash.hexdigest()\n server_digest_dec = self.__hex_to_decimal_string(server_digest)\n server_message = \"*#%s##\" % server_digest_dec\n\n if current_socket.recv(4096) == server_message.encode():\n current_socket.send(self.ACK)\n self.logger.info('Authenticated')\n return True\n return False\n\n @staticmethod\n def on_mqtt_connect(client, userdata, flags, rc):\n logger = logging.getLogger(\"own2mqtt\")\n logger.info('Connected to MQTT')\n userdata['own_instance'].mqtt_ready = True\n\n topics = [\n (f'{userdata[\"base_topic\"]}/command_frame', 0),\n (f'{userdata[\"base_topic\"]}/who-1/+/command', 0),\n (f'{userdata[\"base_topic\"]}/who-2/+/command', 0),\n (f'{userdata[\"base_topic\"]}/who-2/+/set_position', 0),\n (f'{userdata[\"base_topic\"]}/who-4/zones/+/mode/set', 0),\n (f'{userdata[\"base_topic\"]}/who-4/zones/+/temperature/set', 0),\n ]\n client.subscribe(topics)\n\n @staticmethod\n def on_mqtt_disconnect(client, userdata, rc):\n logger = logging.getLogger(\"own2mqtt\")\n logger.info('MQTT Disconnected')\n userdata['own_instance'].mqtt_ready = False\n\n @staticmethod\n def on_mqtt_message(client, userdata, message):\n logger = logging.getLogger(\"own2mqtt\")\n logger.debug('MQTT: TOPIC: %s | PAYLOAD: %s', message.topic, message.payload)\n OWNFrameCommand(userdata['own_instance'], message.topic, message.payload)\n\n @staticmethod\n def __read_socket(current_socket):\n data_received = ''\n while not data_received.endswith('##'):\n data_received = data_received + current_socket.recv(1).decode()\n return regex.findall(r\"\\*#?[\\d\\*]*#?0?[\\d\\*]+#?[\\d\\*]*##\", data_received)\n\n @staticmethod\n def __create_rb_hex():\n return binascii.hexlify(os.urandom(32)).decode()\n\n @staticmethod\n def __decimal_string_to_hex(s):\n hex_string = ''\n for (chars) in regex.findall('..', s):\n hex_string += hex(int(chars))[2:]\n return hex_string\n\n @staticmethod\n def __hex_to_decimal_string(h):\n dec_string = ''\n for (chars) in regex.findall('.', h):\n dec_string += '{0:02d}'.format(int(chars, 16))\n return dec_string\n","repo_name":"superinj/addon-own2mqtt","sub_path":"own2mqtt/rootfs/openwebnet.py","file_name":"openwebnet.py","file_ext":"py","file_size_in_byte":11168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30530195745","text":"import matplotlib.pyplot as plt\nimport csv\n\n\n\ndef main():\n x=[]\n y = []\n data = csv.reader(open(\"pts.log\"))\n for l in data:\n a,b = map(float, l)\n x.append(a)\n y.append(b)\n\n plt.plot(x,y)\n plt.show()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"yhoazk/CarND-Path-Planning-Project","sub_path":"helper/spline_test/show.py","file_name":"show.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32146137922","text":"N = (-1, 0)\nS = (1, 0)\nW = (0, -1)\nE = (0, 1)\nWAIT = (0, 0)\n\n\ndef parse(path):\n with open(path) as f:\n data = [list(l.strip()[1:-1]) for l in f.readlines()][1:-1]\n\n n = len(data)\n m = len(data[0])\n grid = [[[] for _ in range(m)] for _ in range(n)]\n\n for r, l in enumerate(data):\n for c, s in enumerate(l):\n if s == \"^\":\n grid[r][c].append(N)\n elif s == \"v\":\n grid[r][c].append(S)\n elif s == \">\":\n grid[r][c].append(E)\n elif s == \"<\":\n grid[r][c].append(W)\n\n return grid\n\n\ndef print_blizzards(blizzards):\n res = \"\"\n smap = {N: \"^\", S: \"v\", E: \">\", W: \"<\"}\n for _, l in enumerate(blizzards):\n for _, b in enumerate(l):\n blen = len(b)\n if blen == 0:\n res += \".\"\n elif blen > 1:\n res += str(blen) if blen < 10 else \"!\"\n else:\n res += smap[list(b)[0]]\n\n res += \"\\n\"\n print(res)\n\n\ndef evolve(blizzards):\n n = len(blizzards)\n m = len(blizzards[0])\n next_blizzard = [[[] for _ in range(m)] for _ in range(n)]\n for i in range(0, n):\n for j in range(0, m):\n for b in blizzards[i][j]:\n i_n, j_n = (i + b[0]) % n, (j + b[1]) % m\n next_blizzard[i_n][j_n].append(b)\n return next_blizzard\n\n\ndef part1(blizzards):\n n = len(blizzards)\n m = len(blizzards[0])\n start = (-1, 0)\n stop = (n, m - 1)\n\n return bfs_helper(blizzards, start, stop)\n\n\ndef part2(blizzards):\n n = len(blizzards)\n m = len(blizzards[0])\n start = (-1, 0)\n stop = (n, m - 1)\n t0 = bfs_helper(blizzards, start, stop, 0)\n t1 = bfs_helper(blizzards, stop, start, t0)\n t2 = bfs_helper(blizzards, start, stop, t1)\n\n return t2\n\n\ndef to_tuple(x):\n if isinstance(x, list):\n return tuple([to_tuple(v) for v in x])\n else:\n return x\n\n\nblizzard_cache = {}\ndef precalc_blizzard_states(blizzards):\n key = to_tuple(blizzards)\n if key in blizzard_cache:\n return blizzard_cache[key]\n\n blizzards_state = [blizzards]\n while len(blizzards_state) <= 2 or blizzards_state[0] != blizzards_state[-1]:\n blizzards_state.append(evolve(blizzards_state[-1]))\n blizzard_cache[key] = (blizzards_state, len(blizzards_state) - 1)\n\n return blizzard_cache[key]\n\n\ndef bfs_helper(blizzards, start, stop, t0=0):\n n = len(blizzards)\n m = len(blizzards[0])\n\n blizzards_state, bn = precalc_blizzard_states(blizzards)\n\n directions = [S, E, WAIT, W, N]\n\n q = [(t0, start)]\n explored = set((t0, start))\n\n while q:\n t, loc = q.pop(0)\n row, col = loc\n\n if (\n 0 <= row < n\n and 0 <= col\n and blizzards_state[t % bn][loc[0]][loc[1]]\n ):\n continue\n\n if loc == stop:\n return t\n\n for d in directions:\n next_row, next_col = row + d[0], col + d[1]\n if not (0 <= next_row < n and 0 <= next_col < m or (next_row, next_col) in (start, stop)):\n continue\n state = (t + 1, (next_row, next_col))\n if state not in explored:\n explored.add(state)\n q.append(state)\n\n\ndef main():\n test_input_file = \"test_input.dat\"\n test_input = parse(test_input_file)\n assert part1(test_input) == 18\n\n input_file = \"input.dat\"\n input_data = parse(input_file)\n print(part1(input_data))\n\n assert part2(test_input) == 54\n print(part2(input_data))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"siggi84/advent_of_code_2022","sub_path":"24/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":3589,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"29287246275","text":"# -*- coding: utf-8 -*-\n# @Auther : liou\n\nimport math\nimport copy\nimport torch\nfrom torch import nn\nfrom .utils import LayerNorm\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom .attention import MultiHeadedAttention\nfrom .EncoderDecoder import EncoderDecoder, Encoder, Decoder, EncoderLayer, DecoderLayer, Generator\n\nclass PositionwiseFeedForward (nn.Module) :\n \"\"\"模型中的feed forward部分\n 此处的feed forward为逐个位置的,即:\n attention部分针对每个位置都生成一个d_model大小的内容,所以总的输出为B * Length * d_model\n 按位置代表对每一个x,即length上每一个单位,使用相同的变换矩阵\n\n 就是两个全连接层和relu\n 公式:\n FFN (x) = max (0, xW1 + b1) W2 + b2\n 默认论文中中间隐藏层d_ff = 2048\n \"\"\"\n def __init__(self, d_model, d_ff, dropout = 0.1):\n super(PositionwiseFeedForward, self).__init__()\n self.w_1 = nn.Linear (d_model, d_ff)\n self.w_2 = nn.Linear (d_ff, d_model)\n self.dropout = nn.Dropout (dropout)\n\n def forward (self, x) :\n return self.w_2 (self.dropout (F.relu(self.w_1 (x))))\n\nclass Embedding (nn.Module) :\n \"\"\"对词作embedding\n 返回长度为d_model的embedding,并且乘了d_model^0.5\n \"\"\"\n def __init__(self, d_model, vocab):\n super(Embedding, self).__init__()\n self.lut = nn.Embedding (vocab, d_model)\n self.d_model = d_model\n\n def forward (self, x) :\n return self.lut (x) * math.sqrt(self.d_model)\n\n\nclass PositionalEncoding (nn.Module) :\n \"\"\"位置编码\n 具体内容见论文\n \"\"\"\n def __init__(self, d_model, dropout, max_len = 500):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout (p = dropout)\n\n pe = torch.zeros (max_len, d_model)\n position = torch.arange(0, max_len).unsqueeze(1)\n div_term = torch.exp (torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model))\n\n pe[:, 0 :: 2] = torch.sin (position * div_term)\n pe[:, 1 :: 2] = torch.cos (position * div_term)\n pe = pe.unsqueeze(0)\n\n self.register_buffer ('pe', pe)\n\n def forward (self, x) :\n x = x + Variable (self.pe[:, : x.size(1)], requires_grad = False)\n return self.dropout (x)\n\ndef make_model (src_vocab, tgt_vocab, N = 6, d_model = 512, d_ff = 2048, h = 8, dropout = 0.1) :\n c = copy.deepcopy\n attn = MultiHeadedAttention (h, d_model)\n ff = PositionwiseFeedForward (d_model, d_ff, dropout)\n position = PositionalEncoding (d_model, dropout)\n model = EncoderDecoder (\n Encoder (EncoderLayer (d_model, c(attn), c(ff), dropout), N),\n Decoder (DecoderLayer (d_model, c(attn), c(ff), dropout), N),\n nn.Sequential (Embedding (d_model, src_vocab, c(position))),\n nn.Sequential (Embedding (d_model, tgt_vocab, c(position))),\n Generator (d_model, tgt_vocab)\n )\n\n for p in model.parameters () :\n if p.dim () > 1 :\n nn.init.xavier_uniform_(p)\n return model\n","repo_name":"rm-rf-me/Transform_Learn","sub_path":"model/transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":3071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73031010293","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport unittest\nimport sys\ntry:\n from io import StringIO\nexcept:\n from StringIO import StringIO\nfrom structural.facade import TestRunner, TC1, TC2, TC3\n\n\nclass TestRunnerFacilities(unittest.TestCase):\n\n def setUp(self):\n self.tc1 = TC1()\n self.tc2 = TC2()\n self.tc3 = TC3()\n self.average_result_tc1 = \"###### In Test 1 ######\\n\" + \\\n \"Setting up\\n\" + \\\n \"Running test\\n\" + \\\n \"Tearing down\\n\" + \\\n \"Test Finished\"\n self.average_result_tc2 = \"###### In Test 2 ######\\n\" + \\\n \"Setting up\\n\" + \\\n \"Running test\\n\" + \\\n \"Tearing down\\n\" + \\\n \"Test Finished\"\n self.average_result_tc3 = \"###### In Test 3 ######\\n\" + \\\n \"Setting up\\n\" + \\\n \"Running test\\n\" + \\\n \"Tearing down\\n\" + \\\n \"Test Finished\"\n self.runner = TestRunner()\n self.out = StringIO()\n self.saved_stdout = sys.stdout\n sys.stdout = self.out\n\n def tearDown(self):\n self.out.close()\n sys.stdout = self.saved_stdout\n\n def test_tc1_output(self):\n self.tc1.run()\n output = self.out.getvalue().strip()\n self.assertEqual(output, self.average_result_tc1)\n\n def test_tc2_output(self):\n self.tc2.run()\n output = self.out.getvalue().strip()\n self.assertEqual(output, self.average_result_tc2)\n\n def test_tc3_output(self):\n self.tc3.run()\n output = self.out.getvalue().strip()\n self.assertEqual(output, self.average_result_tc3)\n\n def test_bunch_launch(self):\n self.runner.runAll()\n output = self.out.getvalue().strip()\n self.assertEqual(output, str(self.average_result_tc1 + '\\n\\n' +\n self.average_result_tc2 + '\\n\\n' +\n self.average_result_tc3))\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/faif_python-patterns/python-patterns-master/tests/test_facade.py","file_name":"test_facade.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"35704997257","text":"import numpy as np\nimport pydicom\nfrom PIL import Image\nimport os\n\nimport json\n\nclass dicomData:\n def __init__(self, image_dicom):\n self.image_dicom = image_dicom\n self.dicom_metadata = {}\n self.simple_image_data = None\n\n def fetch_metadata(self):\n data = pydicom.filereader.dcmread(self.image_dicom, stop_before_pixels=True)\n self.dicom_metadata = data.to_json_dict() #convert metadata into the dictionary format\n\n def extract_simple_image(self):\n simple_image = pydicom.dcmread(self.image_dicom)\n simple_image = simple_image.pixel_array.astype(float)\n rescaled_image = (np.maximum(simple_image, 0) / simple_image.max()) * 255 #resclaing pixel values in range of (0,255)\n self.simple_image_data = np.uint8(rescaled_image)\n self.simple_image_data = Image.fromarray(self.simple_image_data)\n\n def save_image(self, output_folder):\n os.makedirs(output_folder, exist_ok=True)\n image_name = f\"{os.path.basename(self.image_dicom).split('/')[-1]}.png\"\n image_path = os.path.join(output_folder, image_name)\n self.simple_image_data.save(image_path) # save the image at given path\n\n def export_metadata_to_json(self, output_json):\n with open(\n f\"{output_json}/{os.path.basename(self.image_dicom).split('/')[-1]}.json\",\n \"w\",\n ) as json_file:\n json.dump(self.dicom_metadata, json_file, indent=2)\n\n#fetching all the files in the input folder and processing it one by one\ndef process_dicom_folder(input_folder, output_folder, json_output_file):\n for root, dirs, files in os.walk(input_folder):\n for file in files:\n if file.lower().endswith(\".dicom\"):\n dicom_file_path = os.path.join(root, file)\n dicom_image = dicomData(dicom_file_path)\n dicom_image.fetch_metadata()\n dicom_image.extract_simple_image()\n dicom_image.save_image(output_folder)\n dicom_image.export_metadata_to_json(json_output_file)\n\n\ninput_images_path = \"task1/input\"\noutput_images_path = \"task1/output\"\noutput_metadata_path = \"task1/metadata\"\n\n\nprocess_dicom_folder(input_images_path, output_images_path, output_metadata_path)\n","repo_name":"avneesh-jha/assignment","sub_path":"task1/medical_data.py","file_name":"medical_data.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73026946933","text":"from dataprep import data_prep\nimport argparse\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-d\", \"--datapath\", required = True,\n help = \"path to where the face cascade resides\")\nargs = vars(ap.parse_args())\ndata_path = args[\"datapath\"]\n\ndata_prep(data_path)","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/RyanZotti_Self-Driving-Car/Self-Driving-Car-master/run_dataprep.py","file_name":"run_dataprep.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"8041330286","text":"from xml.etree import ElementTree\n\nfrom .linter import Linter\nfrom .report import Report\nfrom .utils import run_command\n\n\n__all__ = []\n\n\nDEFAULT_CONFIG = {\n # Filename filter rules\n 'filefilter': ['+ *.h', '+ *.h.in', '+ *.cpp', '+ *.c'],\n}\n\n\ndef lint(_config: dict, report: Report, numproc: int = 1, _fixit: bool = False):\n \"\"\"Lint with cppcheck.\n\n Parameters\n ----------\n _config\n Dictionary that contains the configuration for the linter.\n _config is not used by this linter.\n report\n Collection of filenames and corresponding messages.\n numproc\n The number of processors to use.\n _fixit\n When True, the linter will try to fix (a part of) the problems in each\n file.\n\n \"\"\"\n # Get version\n print('USING VERSION : {0}'.format(\n run_command(['cppcheck', '--version'], verbose=False)[0].strip()))\n\n if len(report.filenames) > 0:\n # Call Cppcheck\n command = (['cppcheck', '-j', str(numproc)] + report.filenames +\n ['-q', '--enable=all', '--language=c++', '--std=c++11', '--xml',\n '--suppress=missingIncludeSystem', '--suppress=unusedFunction'])\n xml_str = run_command(command)[1]\n etree = ElementTree.fromstring(xml_str)\n\n # Parse the output of Cppcheck into standard return values\n for error in etree:\n if 'file' not in error.attrib:\n continue\n # key = '{:<15} {:<40} {:<30}' % (error.attrib['severity'],\n # error.attrib['file'],\n # error.attrib['id'])\n text = '{} {} {}'.format(\n error.attrib['severity'], error.attrib['id'], error.attrib['msg'])\n lineno = int(error.attrib['line'])\n if lineno == 0:\n lineno = None\n report(error.attrib['file'], lineno, None, text)\n\n\nLINTER = Linter('cppcheck', lint, DEFAULT_CONFIG, language='cpp')\n","repo_name":"theochem/cardboardlint","sub_path":"cardboardlint/linter_cppcheck.py","file_name":"linter_cppcheck.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"32505898616","text":"import os\nimport shutil\n\n# Define the source and destination folders\nroot_dir = r'D:\\Photos Eujin\\Eujin Database'\nsource_folders = [os.path.join(root_dir,dir) for dir in os.listdir(root_dir)[1:]]\ndestination_folder = r\"D:\\Photos Eujin\\Eujin Database\\1\"\n\n# Iterate over the source folders and move everything to the destination folder\nfor source_folder in source_folders:\n for root, dirs, files in os.walk(source_folder):\n for file in files:\n try:\n shutil.move(os.path.join(root,file), os.path.join(destination_folder, file))\n except Exception as e:\n print(e)\n\nprint(\"All files have been moved to the first folder.\")\n","repo_name":"eujin-eujin/pycharmprojects","sub_path":"automation.py","file_name":"automation.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40681120654","text":"'''\r\n\tHolds the classes for laser light control.\r\n\r\n\t#Laser Classes\r\n\r\n\t`Laser()` is just meant to group together `LaserBeam`s as part of the\r\n\tdatatypes it remembers. `LaserBeam()` is just meant to group together\r\n\tfrequency and intensity controller classes: `LaserIntensity()` and\r\n\t`LaserFrequency()`.\r\n\t\r\n\t##Desired Example Definitions\r\n\r\n\t```python\r\n\r\n\t#define the lasers\r\n\tgreen \t= GreenLaser()\r\n\tyellow\t= YellowLaser()\r\n\ttrap \t= TrapLaser()\r\n\t```\r\n\r\n\t##Desired Example Usage\r\n\t```python\r\n\t#setting the frequency does not automatically turn on the power\r\n\tgreen.probe.frequency.constant(t,'10 Mhz')\r\n\tgreen.probe.turnoff(t)\r\n\r\n\t#automatically checks to see if laser is off, if so turn on.\r\n\tgreen.probe.intensity.constant(t,'1 mW')\r\n\tgreen.probe.turnoff(t)\r\n\t```\r\n'''\r\nimport builtins\r\nfrom labscript.labscript import AnalogQuantity\r\n\r\nDEBUG = False\r\nmV = 1e-3\r\nms = 1e-3\r\n\r\nclass LaserFrequency(AnalogQuantity):\r\n\t'''\r\n\r\n\t\tYou actually don't need a special class for LaserFrequency. It's just of\r\n\t\ttype AnalogQuantity, so just pass the frequency channel to\r\n\t\t`LaserBeam(frequency_control)`.\r\n\t\r\n\t'''\r\n\tpass\r\n\r\nclass LaserIntensity():\r\n\t'''\r\n\t\t\r\n\t\tThis is a controller for dealing with the annoying details of turning a\r\n\t\tlaser on and off.\r\n\r\n\t\tAuto turnoff, turnon features assume sequential usage of the light power\r\n\t\tcommands. If you do them out of order, they will not behave correctly. In\r\n\t\tthis case, you need to manually set is_on = True/False, or set the overload\r\n\t\targ to true.\r\n\r\n\t\t#To Do\r\n\r\n\t\t\t[x] turnoff function\r\n\t\t\t[x] turnon function\r\n\t\t\t[x] constant\r\n\t\t\t[x] ramp\r\n\r\n\t'''\r\n\r\n\t#channels for each of the possible hardware controls\r\n\t__intensity_channel = None #AOM/EOM\r\n\t__shutter_channel \t= None #Shutter\r\n\t__rf_switch_channel\t= None #RF Switch\r\n\r\n\t__turnoff_voltage \t= None #For the AOM/EOM\r\n\t__shutter_closetime\t= None #Close time from after the TTL is sent\r\n\t__shutter_opentime \t= None\r\n\t#state variable for keeping track of turning the laser on or off\r\n\tis_on = False \r\n\r\n\tdef __init__(self, \r\n\t\tintensity_channel=None, \r\n\t\tshutter_channel=None, \r\n\t\tturnoff_voltage=None,\r\n\t\tshutter_closetime=None,\r\n\t\tshutter_opentime=None,\r\n\t\trf_switch_channel=None,\r\n\t\t):\r\n\r\n\t\tself.__intensity_channel\t= intensity_channel\r\n\t\tself.__shutter_channel \t= shutter_channel\r\n\t\tself.__rf_switch_channel\t= rf_switch_channel\r\n\r\n\t\tself.__turnoff_voltage \t= turnoff_voltage\r\n\t\tself.__shutter_closetime\t= shutter_closetime\r\n\t\tself.__shutter_opentime \t= shutter_opentime\r\n\r\n\t\t#determine shutter close time\r\n\t\tif self.__shutter_closetime is None:\r\n\t\t\tself.__shutter_closetime =\tglobal_shutter_closetime\r\n\r\n\t\tif self.__shutter_opentime is None:\r\n\t\t\tself.__shutter_opentime =\tglobal_shutter_closetime\r\n\r\n\r\n\r\n\tdef turnoff(self, t, warmup_value, overload=False):\r\n\t\t'''\r\n\t\t\tTurns off beam if and only if on unless `overload == True` then always turn off.\r\n\t\t\t`warmup_value` is what value you want for the aom while the shutter is closed.\r\n\r\n\t\t\t#To Do\r\n\t\t\t\t[x]\tSet up AOM/EOM turn on/off\r\n\t\t\t\t[x]\tSet up Shutter turn on/off\r\n\t\t\t\t[x]\tSet up RF Switch turn on/off\r\n\r\n\t\t'''\r\n\r\n\t\tshutter_closetime = self.__shutter_closetime\r\n\t\t#determine aom/eom turnoff voltage\r\n\t\tif self.__turnoff_voltage is None:\r\n\t\t\tturnoff_voltage = 0\r\n\t\telse:\r\n\t\t\tturnoff_voltage = self.__turnoff_voltage\r\n\r\n\r\n\r\n\r\n\r\n\t\tif self.is_on or overload:\r\n\t\t\t#turn off aom/eom\r\n\t\t\tif self.__rf_switch_channel is None:\r\n\t\t\t\tself.__intensity_channel.constant(t,value=turnoff_voltage)\r\n\t\t\telse:\r\n\t\t\t\tself.__rf_switch_channel.disable(t)\r\n\t\t\t\r\n\t\t\t#close shutter iff we have a shutter\r\n\t\t\tif self.__shutter_channel is not None:\r\n\t\t\t\tself.__shutter_channel.disable(t)\r\n\r\n\t\t\t\t#turn on aom/eom iff we have a shutter\r\n\t\t\t\tself.__intensity_channel.constant(t + shutter_closetime,value=warmup_value)\r\n\t\t\t\t\r\n\t\t\t\tif self.__rf_switch_channel is None:\r\n\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.__rf_switch_channel.enable(t + shutter_closetime)\r\n\r\n\t\t\t#change on/off status\r\n\t\t\tself.is_on = False\r\n\t\treturn self.__shutter_closetime + self.__shutter_opentime\r\n\tdef turnon(self, t, *args, overload=False, **kwargs):\r\n\t\t'''\r\n\t\t\tTurns on beam if and only if off. `*args`, `**kwargs` get passed to the turn on value for the laser.\r\n\r\n\t\t\t## Sequence Turn On Details\r\n\r\n\t\t\tWe turn off the beam with the fast control (AOM/EOM) before opening. This\r\n\t\t\tensures that the light profile across the atoms is 1) always uniform and 2)\r\n\t\t\twell controlled in the time domain.\r\n\r\n\t\t\t#To Do\r\n\t\t\t\t[x]\tSet up AOM/EOM turn on/off\r\n\t\t\t\t[x]\tSet up Shutter turn on/off\r\n\t\t\t\t[x]\tSet up RF Switch turn on/off\r\n\t\t'''\r\n\r\n\r\n\t\t#determine shutter close time\r\n\t\tif self.__shutter_opentime is None:\r\n\t\t\tshutter_opentime =\tglobal_shutter_closetime\r\n\t\telse:\r\n\t\t\tshutter_opentime = self.__shutter_opentime\r\n\t\t#determine aom/eom turnoff voltage\r\n\t\tif self.__turnoff_voltage is None:\r\n\t\t\tturnoff_voltage = 0\r\n\t\telse:\r\n\t\t\tturnoff_voltage = self.__turnoff_voltage\r\n\r\n\r\n\r\n\r\n\t\tif (not self.is_on) or overload:\r\n\t\t\t#if we have a shutter\r\n\t\t\tif self.__shutter_channel is not None:\r\n\t\t\t\t#turn off beam\r\n\t\t\t\tif self.__rf_switch_channel is not None:\r\n\t\t\t\t\t#just turn off the rf switch\r\n\t\t\t\t\tself.__rf_switch_channel.disable(t - shutter_opentime)\r\n\t\t\t\telse:\r\n\t\t\t\t\t#turn off the aom/eom\r\n\t\t\t\t\tself.__intensity_channel.constant(t - shutter_opentime,value=turnoff_voltage)\r\n\r\n\t\t\t\t#open shutter\r\n\t\t\t\tself.__shutter_channel.enable(t - shutter_opentime)\r\n\r\n\t\t\t#turn on beam\r\n\t\t\tif self.__rf_switch_channel is not None:\r\n\t\t\t\t#just turn on the rf switch\r\n\t\t\t\tself.__rf_switch_channel.enable(t)\r\n\t\t\telse:\r\n\t\t\t\t#turn on the aom/eom if we've been told a value.\r\n\t\t\t\tif args or kwargs:\r\n\t\t\t\t\tself.__intensity_channel.constant(t, *args, **kwargs)\r\n\t\t\t\t#else turn on to whatever was last set.\r\n\t\t\tself.is_on = True\r\n\t\treturn 0\r\n\r\n\tdef constant(self, t, *args, overload=False, **kwargs):\r\n\t\t#save args, and kwargs as they get modified after self.turnon(t) call for some reason\r\n\t\t_args = args\r\n\t\t_kwargs = kwargs\r\n\t\tself.turnon(t, overload=overload)\r\n\t\tself.__intensity_channel.constant(t, *_args, **_kwargs)\r\n\r\n\tdef ramp(self, t, *args, overload=False, **kwargs):\r\n\t\t#save args, and kwargs as they get modified after self.turnon(t) call for some reason\r\n\t\t_args = args\r\n\t\t_kwargs = kwargs\r\n\t\tself.turnon(t, overload=overload)\r\n\t\treturn self.__intensity_channel.ramp(t, *_args, **_kwargs)\r\n\r\nclass LaserBeam():\r\n\t\"\"\" \r\n\r\n\tThis is a template that holds functions for controlling the laser beam\r\n\tproperties of a *single beampath*: intensity, and frequency. This class\r\n\tmostly just holds together, semantically, the two controllers for the\r\n\tintensity and frequency of our laserbeam.\r\n\r\n\t\"\"\"\r\n\r\n\tintensity\t= None\r\n\tfrequency\t= None\r\n\r\n\tdef __init__(self, intensity_control=None, frequency_control=None):\r\n\t\t'''\r\n\t\t\tAccepts arguments of the type `LaserIntensity`, and `LaserFrequency`.\r\n\r\n\t\t\tEach of the objects of these types define all the control methods for\r\n\t\t\t`self.intensity` and `self.frequency`.\r\n\t\t\t\r\n\t\t\tI want to particular about how my classes distinguish between the channel\r\n\t\t\tand the custom functions defined in the `LaserIntensity` and\r\n\t\t\t`LaserFrequency` classes.\r\n\t\t'''\r\n\t\t#save the controller\r\n\t\tself.intensity = intensity_control\r\n\t\tself.frequency = frequency_control\r\n\t\tsuper().__init__()\r\n\r\n\tdef turnoff(self, *args, **kwargs):\r\n\t\t'''\r\n\t\t\tCalls the turn off method in the intensity controller.\r\n\t\t'''\r\n\r\n\t\treturn self.intensity.turnoff(*args,**kwargs)\r\n\r\nclass Laser():\r\n\t'''\r\n\t\tThis keeps track of the various laser beampaths that a single laser can be the source of. This is good for grouping our beampaths symantically.\r\n\t\tThis is all it can do functionally.\r\n\r\n\t\tThe really laser managment must be done in the LaserBeam class.\r\n\r\n\t\tE.g:\r\n\r\n\t\t```python\r\n\t\tgreen \t= Laser()\r\n\t\tgreen.probe\t= LaserBeam()\r\n\t\tgreen.pump \t= LaserBeam() \r\n\t\t```\r\n\t'''\r\n\r\n#Specific Lasers:\r\n\r\nclass BlueLaser(Laser):\r\n\r\n\t#beampath names go here\r\n\tmot = None\r\n\r\n\tdef __init__(self):\r\n\t\t''' \r\n\r\n\t\tDefines the `LaserBeam`s, and `LaserIntensity` and `LaserFrequency`\r\n\t\tcontrols.\r\n\r\n\t\tBeampaths: (mot)\r\n\t\t'''\r\n\r\n\t\t#define the beampaths\r\n\t\tself.mot = LaserBeam(\r\n\t\t\t\tintensity_control\t= LaserIntensity(\r\n\t\t\t\t \t\tintensity_channel\t= blue_mot_power,\r\n\t\t\t\t \t\tshutter_channel \t= blue_mot_shutter\r\n\t\t\t\t \t),\r\n\t\t\t\tfrequency_control\t= None\r\n\t\t\t)\r\n\r\n\r\nclass ProbeSidebandRFSwitch():\r\n\tdef enable(self,t):\r\n\t\tprobe_power_switch.enable(t)\r\n\t\tprobe_sideband_power_switch.enable(t)\r\n\t\tprobe_power_error_modulation.constant(t, value=0*mV)\r\n\t\tprobe_sideband_cooling_rf_switch.enable(t)\r\n\tdef disable(self,t ):\r\n\t\tprobe_power_switch.disable(t)\r\n\t\tprobe_sideband_power_switch.disable(t)\r\n\t\tprobe_power_error_modulation.constant(t, value=2.95)\r\n\t\t# probe_power_error_modulation.constant(t, value=5)\r\n\t\tprobe_sideband_cooling_rf_switch.disable(t)\r\n\r\nclass CoolingSigmaRFSwitch():\r\n\tdef enable(self,t):\r\n\t\tprobe_power_switch.enable(t)\r\n\t\tprobe_sideband_cooling_rf_switch.disable(t)\r\n\t\tprobe_power_error_modulation.constant(t, value=0*mV)\r\n\tdef disable(self,t):\r\n\t\tprobe_power_switch.disable(t)\r\n\t\tprobe_power_error_modulation.constant(t, value=2.95)\r\n\t\tprobe_sideband_cooling_rf_switch.enable(t)\r\nclass GreenLaser(Laser):\r\n\r\n\t#beampath names go here\r\n\tprobe \t= None \r\n\tpump \t= None #needs P7888 monitor\r\n\tmot \t= None\r\n\tcooling_pi \t= None\r\n\tcooling_sigma\t= None\r\n\r\n\tdef __init__(self):\r\n\t\tms = 1e-3\r\n\t\t# a custom definition to combine two rf switches\r\n\t\tbuiltins.combined_probe_sideband_power_switch = ProbeSidebandRFSwitch()\r\n\t\tbuiltins.combined_probe_cooling_sigma_power_switch = CoolingSigmaRFSwitch()\r\n\t\t#define the beampaths\r\n\t\tself.mot = LaserBeam(\r\n\t\t\t\tintensity_control = LaserIntensity(\r\n\t\t\t\t\t\tintensity_channel = green_mot_power,\r\n\t\t\t\t\t\trf_switch_channel = green_mot_power_switch,\r\n\t\t\t\t\t\tshutter_channel = green_mot_shutter,\r\n\t\t\t\t\t\tshutter_opentime = 4.9*ms,\r\n\t\t\t\t\t\tshutter_closetime = 6.2*ms\r\n\t\t\t\t\t),\r\n\t\t\t\tfrequency_control = None,\r\n\t\t\t)\r\n\r\n\t\tself.pump = LaserBeam(\r\n\t\t\t\tintensity_control = LaserIntensity(\r\n\t\t\t\t\t\tintensity_channel = pump_power,\r\n\t\t\t\t\t\tshutter_channel = pump_power_switch,\r\n\t\t\t\t\t\t# shutter_closetime = 1*ms\r\n\t\t\t\t\t),\r\n\t\t\t\tfrequency_control = None,\r\n\t\t\t)\r\n\r\n\t\tms = 1e-3\r\n\t\tself.probe_shutter = probe_shutter\r\n\t\tself.cooling_shutter = cooling_sigma_shutter\r\n\t\tself.probe = LaserBeam(\r\n\t\t\t\tintensity_control = LaserIntensity(\r\n\t\t\t\t\t\tintensity_channel\t= probe_sideband_power,\r\n\t\t\t\t\t\trf_switch_channel\t= combined_probe_sideband_power_switch,\r\n\t\t\t\t\t\tshutter_channel \t= probe_sideband_shutter,\r\n\t\t\t\t\t\tshutter_closetime\t= 5.2*ms,\r\n\t\t\t\t\t\tshutter_opentime \t= 5.6*ms\r\n\t\t\t\t\t),\r\n\t\t\t\tfrequency_control = probe_sideband_frequency,\r\n\t\t\t)\r\n\r\n\t\tself.cooling_sigma = LaserBeam(\t\t\t\t\r\n\t\t intensity_control = LaserIntensity(\r\n\t\t\t\t\t\tintensity_channel\t= cooling_sigma_plus_power,\r\n\t\t\t\t\t\trf_switch_channel\t= combined_probe_cooling_sigma_power_switch,\r\n\t\t\t\t\t\tshutter_channel \t= cooling_sigma_shutter,\r\n\t\t\t\t\t\tshutter_closetime\t= 6.28*ms,\r\n\t\t\t\t\t\tshutter_opentime \t= 3.36*ms\r\n\t\t\t\t\t),\r\n\t\t\t\tfrequency_control = cooling_sideband_frequency,#SRS FM input\r\n\t\t\t)\r\n\r\n\r\n\t\tself.cooling_pi = LaserBeam(\r\n\t\t\t\tintensity_control = LaserIntensity(\r\n\t\t\t\t\t\tintensity_channel = cooling_pi_power,\r\n\t\t\t\t\t\trf_switch_channel = cooling_pi_power_switch,\r\n\t\t\t\t\t\tshutter_channel = cooling_pi_shutter\r\n\t\t\t\t\t),\r\n\t\t\t\tfrequency_control = None,\r\n\t\t\t)\r\n\r\nclass RedLaser(Laser):\r\n\r\n\t#beampath names go here\r\n\tcavity \t= None\r\n\ttransverse\t= None\r\n\r\n\tdef __init__(self):\r\n\t\tself.cavity = LaserBeam(\r\n\t\t\t\tintensity_control = LaserIntensity(\r\n\t\t\t\t\t\tintensity_channel = red_cavity_power,\r\n\t\t\t\t\t\trf_switch_channel = red_cavity_power_switch\r\n\t\t\t\t\t)\r\n\t\t\t)\r\n\r\n\t\tself.transverse = LaserBeam(\r\n\t\t\t\tintensity_control = LaserIntensity(\r\n\t\t\t\t\t\tintensity_channel = red_transverse_power\r\n\t\t\t\t\t)\r\n\t\t\t)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\tpass","repo_name":"collegefishies/sequences-and-connection-tables","sub_path":"classes/laser_beams.py","file_name":"laser_beams.py","file_ext":"py","file_size_in_byte":11728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22701741600","text":"\"\"\"Evaluation for ip5wke.\n\nAccuracy:\nip5wke_train.py achieves 94% accuracy after 100K steps (256 epochs\nof data) as judged by ip5wke_eval.py.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom datetime import datetime\nimport math\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\nimport ip5wke\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_string('eval_dir', '/tmp/ip5wke_eval',\n \"\"\"Directory where to write event logs.\"\"\")\ntf.app.flags.DEFINE_string('eval_data', 'validation',\n \"\"\"Either 'test' or 'validation'.\"\"\")\ntf.app.flags.DEFINE_string('checkpoint_dir', '/tmp/ip5wke_train',\n \"\"\"Directory where to read model checkpoints.\"\"\")\ntf.app.flags.DEFINE_integer('eval_interval_secs', 600,\n \"\"\"How often to run the eval.\"\"\")\ntf.app.flags.DEFINE_integer('num_examples', 8000,\n \"\"\"Number of examples to run.\"\"\")\ntf.app.flags.DEFINE_boolean('run_once', False,\n \"\"\"Whether to run eval only once.\"\"\")\n\ntf.app.flags.DEFINE_float('dropout_keep_probability', 1.0,\n \"How many nodes to keep during dropout\")\ntf.app.flags.DEFINE_integer('batch_size', 32,\n \"\"\"Number of images to process in a batch.\"\"\")\ntf.app.flags.DEFINE_integer('is_training', False,\n \"\"\"Is training or not for batch norm\"\"\")\n\n\ndef eval_once(saver, summary_writer, top_k_op, top_k_op2, conf_matrix_op,\n num_classes, summary_op):\n \"\"\"Run Eval once.\n\n Args:\n saver: Saver.\n summary_writer: Summary writer.\n top_k_op: Top K op.\n summary_op: Summary op.\n \"\"\"\n config = tf.ConfigProto(\n device_count={'GPU': 0}\n )\n with tf.Session(config=config) as sess:\n ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n # Restores from checkpoint\n saver.restore(sess, ckpt.model_checkpoint_path)\n # Assuming model_checkpoint_path looks something like:\n # /my-favorite-path/ip5wke_train/model.ckpt-0,\n # extract global_step from it.\n global_step = ckpt.model_checkpoint_path.split('/')[-1] \\\n .split('-')[-1]\n else:\n print('No checkpoint file found')\n return\n\n # Start the queue runners.\n coord = tf.train.Coordinator()\n try:\n threads = []\n for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):\n threads.extend(qr.create_threads(sess, coord=coord, daemon=True,\n start=True))\n\n num_iter = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size))\n \n # calculate accuracy, precision, recall and f1 score\n true_count = 0 # Counts the number of correct predictions.\n true_count2 = 0\n total_sample_count = num_iter * FLAGS.batch_size\n step = 0\n precisions = np.zeros(shape=(num_classes))\n recalls = np.zeros(shape=(num_classes))\n tp = np.zeros(shape=(num_classes))\n\n while step < num_iter and not coord.should_stop():\n predictions, predictions2, conf_matrix = sess.run(\n [top_k_op, top_k_op2, conf_matrix_op])\n true_count += np.sum(predictions)\n true_count2 += np.sum(predictions2)\n precisions += conf_matrix.sum(axis=0)\n recalls += conf_matrix.sum(axis=1)\n tp += np.diagonal(conf_matrix)\n\n step += 1\n\n # Compute precision @ 1.\n precision = true_count / total_sample_count\n precision2 = true_count2 / total_sample_count\n print('%s: precision @ 1 = %.3f, @ 3 = %.3f' % (datetime.now(),\n precision,\n precision2))\n\n precs = np.divide(tp, precisions)\n recs = np.divide(tp, recalls)\n f1_scores = np.multiply(2.0, np.divide(np.multiply(precs, recs),\n np.add(precs, recs)))\n\n print('precisions: ' + str(precs))\n print('recalls: ' + str(recs))\n print('f1: ' + str(f1_scores))\n\n summary = tf.Summary()\n summary.ParseFromString(sess.run(summary_op))\n summary.value.add(tag='Precision @ 1', simple_value=precision)\n summary_writer.add_summary(summary, global_step)\n except Exception as e: # pylint: disable=broad-except\n coord.request_stop(e)\n\n coord.request_stop()\n coord.join(threads, stop_grace_period_secs=10)\n\n\ndef evaluate():\n \"\"\"Eval ip5wke for a number of steps.\"\"\"\n with tf.Graph().as_default() as g:\n # Get images and labels for ip5wke.\n eval_data = FLAGS.eval_data == 'test'\n images, labels = ip5wke.inputs(eval_data=eval_data)\n\n # Build a Graph that computes the logits predictions from the\n # inference model.\n logits = ip5wke.inference(images)\n\n # Calculate predictions.\n top_k_op = tf.nn.in_top_k(logits, labels, 1)\n top_k_op2 = tf.nn.in_top_k(logits, labels, 3)\n conf_matrix_op = tf.contrib.metrics.confusion_matrix(\n tf.argmax(logits, 1), labels,\n num_classes=ip5wke.NUM_CLASSES)\n\n # Restore the moving average version of the learned variables for eval.\n variable_averages = tf.train.ExponentialMovingAverage(\n ip5wke.MOVING_AVERAGE_DECAY)\n variables_to_restore = variable_averages.variables_to_restore()\n saver = tf.train.Saver(variables_to_restore,\n write_version=tf.train.SaverDef.V2)\n\n # Build the summary operation based on the TF collection of Summaries.\n summary_op = tf.summary.merge_all()\n\n summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, g)\n\n while True:\n eval_once(saver, summary_writer, top_k_op, top_k_op2,\n conf_matrix_op, ip5wke.NUM_CLASSES, summary_op)\n if FLAGS.run_once:\n break\n time.sleep(FLAGS.eval_interval_secs)\n\n\ndef main(argv=None): # pylint: disable=unused-argument\n # ip5wke.maybe_download_and_extract()\n if tf.gfile.Exists(FLAGS.eval_dir):\n tf.gfile.DeleteRecursively(FLAGS.eval_dir)\n tf.gfile.MakeDirs(FLAGS.eval_dir)\n evaluate()\n\n\nif __name__ == '__main__':\n tf.app.run()\n","repo_name":"Panaetius/IP5","sub_path":"src/models/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":6690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40398277825","text":"from core.factories import settings\nfrom ssl import create_default_context\nfrom gino.ext.starlette import Gino\n\n\nif not settings.DEBUG:\n ssl_object = create_default_context(cafile=settings.SSL_CERT_FILE)\n\n db: Gino = Gino(\n dsn=settings.DATABASE_URL,\n echo=False,\n ssl=ssl_object,\n pool_min_size=3,\n pool_max_size=20,\n retry_limit=1,\n retry_interval=1,\n )\nelse:\n\n db: Gino = Gino(\n dsn=settings.DATABASE_URL)\n","repo_name":"Turall/FastApi-boilerplate","sub_path":"core/extensions.py","file_name":"extensions.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"21"} +{"seq_id":"20123217981","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\n\r\nThis is a temporary script file.\r\n\"\"\"\r\nimport csv\r\nimport nltk\r\nimport pandas as pd\r\nfrom nltk.corpus import stopwords \r\nfrom nltk.tokenize import word_tokenize \r\nfrom nltk import PorterStemmer\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nfrom IntegratedCleaningAndStop import stem ,stopword ,nonASCII, remove_alphanumeric\r\n\r\ndef function(s):\r\n final_str=nonASCII(s)\r\n final_str=remove_alphanumeric(final_str)\r\n final_str=stopword(final_str)\r\n final_str=stem(final_str)\r\n final_str.lower()\r\n return final_str\r\nto_remove=[\"yandex\",\"yahoo\",\"ask\",\"duckduckGo\",\"aol\",\"bing\",\"terms of use\",\"privacy policy\",\"curlie\",\"curlie\"\r\n \"facebook\",\"twitter\",\"linkedin\",\"google\",\"ecosia\",\"gigablast\",\"startpage\",\"about\",\"become an editor\",\r\n \"suggest a site\",\"help\",\"forums\",\"login\",\"donate\"]\r\n\r\nlist_URLs = [\"\"]\r\nfinal_list =[]\r\ncount=0;\r\nwhile count<2000 and len(list_URLs)!=0:\r\n temp_list=[]\r\n URL = list_URLs.pop(0)\r\n count=count+1\r\n r = requests.get(URL) \r\n soup = BeautifulSoup(r.content, 'html.parser')\r\n temp_list.append(URL)\r\n \r\n \r\n \r\n title= \" \"\r\n if soup.title is not None:\r\n title = soup.title.string\r\n temp_list.append(function(title))\r\n \r\n meta_desc = \" \"\r\n keywords = \" \"\r\n for meta in soup.find_all('meta'):\r\n if meta.get('name') == 'description' and meta.get('content') is not None :\r\n meta_desc = meta_desc + meta.get('content')\r\n \r\n if meta.get('name') == 'keywords' and meta.get('content') is not None:\r\n keywords = keywords + meta.get('content')\r\n temp_list.append(function(meta_desc))\r\n temp_list.append(function(keywords))\r\n \r\n h1_str = \" \"\r\n for h1 in soup.find_all('h1'):\r\n if h1.string is not None:\r\n h1_str = h1_str + h1.string +\" \"\r\n temp_list.append(function(h1_str)) \r\n \r\n h2_str = \" \"\r\n for h2 in soup.find_all('h2'):\r\n if h2.string is not None:\r\n h2_str = h2_str + h2.string +\" \"\r\n temp_list.append(function(h2_str)) \r\n \r\n h3_str = \" \"\r\n for h3 in soup.find_all('h3'):\r\n if h3.string is not None:\r\n h3_str = h3_str + h3.string +\" \"\r\n temp_list.append(function(h3_str))\r\n \r\n \r\n a_str = \" \"\r\n for link in soup.find_all('a'):\r\n if link.string is not None and link.string.lower() not in to_remove :\r\n a_str = a_str + link.string +\" \"\r\n if link.get('href') is not None and 'http' in link.get('href') and link.get('href') not in list_URLs and link.string is not None and link.string.lower() not in to_remove:\r\n list_URLs.append(link.get('href'))\r\n temp_list.append(function(a_str)) \r\n \r\n p_str = \" \"\r\n for ptags in soup.find_all('p'):\r\n if ptags.string is not None:\r\n p_str = p_str + ptags.string +\" \"\r\n temp_list.append(function(p_str))\r\n \r\n s_str = \" \"\r\n for stags in soup.find_all('strong'):\r\n if stags.string is not None:\r\n s_str = s_str + stags.string +\" \"\r\n temp_list.append(function(s_str))\r\n\r\n b_str = \" \"\r\n for btags in soup.find_all('b'):\r\n if btags.string is not None:\r\n b_str = b_str + btags.string +\" \"\r\n temp_list.append(function(b_str)) \r\n\r\n list_str = \" \"\r\n for list_tags in soup.find_all('li'):\r\n if list_tags.string is not None:\r\n list_str = list_str + list_tags.string +\" \"\r\n temp_list.append(function(list_str))\r\n \r\n i_str = \" \"\r\n for itags in soup.find_all('i'):\r\n if itags.string is not None:\r\n i_str = i_str + itags.string +\" \"\r\n temp_list.append(function(i_str))\r\n \r\n em_str = \" \"\r\n for emtags in soup.find_all('em'):\r\n if emtags.string is not None:\r\n em_str = em_str + emtags.string +\" \"\r\n temp_list.append(function(em_str))\r\n \r\n with open(\"Final_sheet.csv\", \"a\") as fp:\r\n wr = csv.writer(fp, dialect='excel')\r\n wr.writerow(temp_list)\r\n ##final_list.append(temp_list)\r\n\r\n##df = pd.DataFrame(final_list, columns =['URL', 'Title','MetaDescription', 'Keywords','H1', 'H2','H3', 'AnchorText','Paragraph', 'Strong','Bold', 'List_str','Italic', 'Emphasis'])\r\n##df.to_csv('Final_sheet.csv')\r\n\r\n\r\n\r\n","repo_name":"Shukla1101/Breadth-First-Web-Crawler.","sub_path":"Tag_extraction+preprocessing.py","file_name":"Tag_extraction+preprocessing.py","file_ext":"py","file_size_in_byte":4270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22991887608","text":"import logging\nimport multiprocessing\nimport os\nimport select\nimport signal\nimport subprocess\nimport sys\nimport threading\nimport time\n\n\nfrom django_spring.config import Config\nfrom django_spring.utils.logger import get_logger\nfrom django_spring.utils.processes import pid_is_alive\nfrom django_spring.utils.socket_data import (\n bind,\n closing,\n connect,\n read_json,\n fd_redirect_list,\n write_json,\n)\n\n\nclass ClientToAppControlThread(threading.Thread):\n def __init__(self, app_servers, client_sock):\n threading.Thread.__init__(self)\n self.app_servers, self.client_sock = app_servers, client_sock\n\n def send_msg(self, msg):\n app_sock = connect(\n self.app_servers[msg[\"app_env\"]], wait_time=1, max_attempts=5\n )\n with closing(app_sock):\n write_json(msg, app_sock)\n\n def run(self):\n log = get_logger(\"[CLIENT_CTL_THREAD]\")\n log(\"START\")\n\n try:\n with closing(self.client_sock):\n ins, _, _ = select.select([self.client_sock], [], [])\n if ins:\n try:\n msg = read_json(ins[0])\n except ValueError:\n # could be empty string if client disconnects\n return\n self.send_msg(msg)\n finally:\n log(\"DONE\")\n\n\nclass ClientToAppDataThread(threading.Thread):\n def __init__(self, app_servers, client_sock):\n threading.Thread.__init__(self)\n self.app_servers, self.client_sock = app_servers, client_sock\n\n def run(self):\n log = get_logger(\"[CLIENT_DATA_THREAD]\")\n log(\"START\")\n\n try:\n with closing(self.client_sock):\n ins, _, _ = select.select([self.client_sock], [], [])\n if ins:\n msg = read_json(ins[0])\n app_env = msg[\"app_env\"]\n app_sock = connect(\n self.app_servers[app_env], wait_time=3, max_attempts=10\n )\n with closing(app_sock):\n write_json(msg, app_sock)\n redirect_map = {\n app_sock: self.client_sock,\n self.client_sock: app_sock,\n }\n\n while True:\n ins, _, _ = select.select(redirect_map.keys(), [], [])\n if not fd_redirect_list(ins, redirect_map):\n break\n finally:\n log(\"DONE\")\n\n\nclass Manager(object):\n def __init__(self, path_server, path_ctl):\n self.app_servers = multiprocessing.Manager().dict()\n self.path_server = path_server\n self.path_ctl = path_ctl\n self.log = get_logger(\"[MANAGER]\")\n\n def _start_app_server(self, app_server_id):\n path = Config.APP_SOCK_FILE.format(app_server_id)\n multiprocessing.Process(\n target=self._watch, args=(app_server_id, path), daemon=True\n ).start()\n\n def _watch(self, app_server_id, sock_file_path):\n def _exit_child(process, exit_signal):\n if not process:\n return\n try:\n self.log(\"killing subprocess %s\" % exit_signal)\n os.kill(process.pid, exit_signal)\n except OSError:\n pass\n while pid_is_alive(process.pid):\n time.sleep(1)\n\n def _poll_child(process):\n exit_code = process.poll()\n if exit_code is None:\n time.sleep(1)\n return process, False\n elif exit_code != Config.RESTART_EXIT_CODE:\n self.log(\"exit_code other than restart code: %s\" % exit_code)\n return None, True\n else:\n return None, False\n\n def _spawn_child():\n self.log(\"starting subprocess\")\n app_server_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"app_server.py\"\n )\n args = (\n [sys.executable]\n + [\"-W%s\" % o for o in sys.warnoptions]\n + [app_server_path]\n + [sock_file_path]\n + [app_server_id]\n )\n new_environ = os.environ.copy()\n return subprocess.Popen(args, env=new_environ)\n\n p = None\n try:\n while True:\n if p:\n p, err = _poll_child(p)\n if err:\n del self.app_servers[app_server_id]\n return\n else:\n p = _spawn_child()\n self.app_servers[app_server_id] = sock_file_path\n except KeyboardInterrupt:\n pass\n finally:\n _exit_child(p, signal.SIGTERM)\n\n def run(self):\n try:\n with bind(self.path_server) as manager_sock, bind(\n self.path_ctl\n ) as manager_ctl:\n self._start_app_server(\"test\")\n self._start_app_server(\"dev\")\n manager_sock.listen(1)\n manager_ctl.listen(1)\n self.log(\"START LOOP\", logging.WARN)\n\n while True:\n ins, _, _ = select.select([manager_sock], [], [], 1)\n if ins:\n client_sock, _ = ins[0].accept()\n ClientToAppDataThread(\n app_servers=self.app_servers, client_sock=client_sock\n ).start()\n ins, _, _ = select.select([manager_ctl], [], [], 1)\n if ins:\n client_sock, _ = ins[0].accept()\n ClientToAppControlThread(\n app_servers=self.app_servers, client_sock=client_sock\n ).start()\n except KeyboardInterrupt:\n pass\n finally:\n self.log(\"STOP LOOP\", logging.WARN)\n\n\ndef start_manager():\n Manager(Config.MANAGER_SOCK_FILE, Config.MANAGER_CTL_SOCK_FILE).run()\n\n\nif __name__ == \"__main__\":\n start_manager()\n","repo_name":"lime-green/django-spring","sub_path":"django_spring/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":6217,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"21"} +{"seq_id":"43526260932","text":"# Importing header files\nimport numpy as np\nimport warnings\n\nwarnings.filterwarnings('ignore')\n\n#New record\nnew_record=[[50, 9, 4, 1, 0, 0, 40, 0]]\n\n#Reading file\ndata = np.genfromtxt(path, delimiter=\",\", skip_header=1)\nprint(data.shape)\n#Code starts here\ncensus = np.concatenate((data, new_record), axis = 0)\nprint(census.shape)\nage = np.array(census[: , 0])\nmax_age = np.max(age)\nmin_age = np.min(age)\nage_mean = age.mean()\nage_std = np.std(age)\nprint(max_age , min_age , age_mean , age_std)\n\n\n_0 = census[: , 2] == 0\n_1 = census[: , 2] == 1\n_2 = census[: , 2] == 2\n_3 = census[: , 2] == 3\n_4 = census[: , 2] == 4\n\nrace_0 = census[_0]\nrace_1 = census[_1]\nrace_2 = census[_2]\nrace_3 = census[_3]\nrace_4 = census[_4]\n\nlen_0 = len(race_0)\nlen_1 = len(race_1)\nlen_2 = len(race_2)\nlen_3 = len(race_3)\nlen_4 = len(race_4)\nf = 0\n_race = np.array([len_0, len_1, len_2, len_3, len_4])\n_min = np.min(_race)\nfor i in _race:\n if _min == i:\n minority_race = f\n else:\n f += 1\nprint(minority_race)\n\n_citizens = census[: , 0] > 60\nsenior_citizens = census[_citizens]\nworking_hours_sum = senior_citizens[: , 6].sum()\nprint(working_hours_sum)\nsenior_citizens_len = len(senior_citizens)\navg_working_hours = working_hours_sum/senior_citizens_len\nprint('%.2f' % avg_working_hours)\n\n_high = census[: , 1] > 10\nhigh = census[_high]\n\n_low = census[: , 1] <= 10\nlow = census[_low]\n\navg_pay_high = high[: , 7].mean()\navg_pay_low = low[: , 7].mean()\nprint('%.2f' % avg_pay_high , '%.2f' % avg_pay_low)\n","repo_name":"ankitasuman009/greyatom-python-for-data-science","sub_path":"numpy-array/census.py","file_name":"census.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7148877233","text":"from tests import fixtures\nfrom models import TTask, TUser\nfrom database import db_session\n\n\ndef populate_db():\n for user in fixtures.users:\n new_user = TUser(user_id=user[\"user_id\"],\n first_name=user[\"first_name\"],\n last_name=user[\"last_name\"])\n db_session.add(new_user)\n db_session.commit()\n\n # TUser.add_record(first_name=user[\"first_name\"],\n # last_name=user[\"last_name\"])\n for task in fixtures.tasks:\n new_user = TTask(user_id=task[\"user_id\"],\n task_id=task[\"task_id\"],\n name=task[\"name\"],\n desc=task[\"desc\"])\n db_session.add(new_user)\n db_session.commit()\n\n # TTask.add_record(task[\"name\"], task[\"desc\"], task[\"user_id\"])","repo_name":"polosate/simplepython","sub_path":"tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26028817082","text":"import discord\nfrom discord.ext import commands, tasks\n\n\ndef has_custom_commands_role():\n def pred(ctx):\n if discord.utils.get(ctx.author.roles, name=\"Highlighter + News\"):\n return True\n elif discord.utils.get(ctx.author.roles, name=\"Discord Admin\"):\n return True\n else:\n return False\n\n return commands.check(pred)\n\n\nclass ImageCommand:\n def __init__(self, command, file):\n self.command = command\n self.file = file\n\n\nclass Images(commands.Cog, name=\"Images\"):\n def __init__(self, bot):\n self.bot = bot\n self.image_collection = bot.image_database['images']\n self.get_all_images.start()\n\n @tasks.loop(seconds=1, count=1)\n async def get_all_images(self):\n self.bot.image_commands.clear()\n collection = await self.image_collection.find({}).to_list(length=None)\n for document in collection:\n x = ImageCommand(document[\"_id\"], document['file'])\n self.bot.image_commands.append(x)\n\n @commands.group(brief=\"Image Group Commands.\")\n async def image(self, ctx):\n if not ctx.invoked_subcommand:\n await ctx.send(\"This is a group command, use `howler help image` to get list of subcommands under this command.\")\n return\n\n @has_custom_commands_role()\n @image.command(brief=\"Adds a custom image command.\")\n async def add(self, ctx, command):\n if await self.image_collection.count_documents({\"_id\": f\"howler {command}\"}, limit=1) != 0:\n return await ctx.send(\"This command name is already registered.\")\n\n if ctx.message.attachments[0].content_type == \"image/gif\":\n file_name = f\"howler{command}.gif\"\n else:\n file_name = f\"howler{command}.png\"\n\n command_name = f\"howler {command}\"\n image_dict = {\"_id\": command_name, \"file\": file_name}\n await self.image_collection.insert_one(image_dict)\n await ctx.message.attachments[0].save(f\"images/{file_name}\")\n x = ImageCommand(command_name, file_name)\n self.bot.image_commands.append(x)\n\n @has_custom_commands_role()\n @image.command(brief=\"Removes a custom image command.\")\n async def remove(self, ctx, command):\n await self.image_collection.delete_many({\"_id\": f\"howler {command}\"})\n self.get_all_images.start()\n\n @image.command(brief=\"Lists all custom image commands.\")\n async def list(self, ctx):\n list_string = \"```\"\n for command in self.bot.image_commands:\n list_string += f\"{command.command}\\n\"\n\n list_string += \"```\"\n\n embed = discord.Embed(\n title=\"Image Commands\",\n description=list_string\n )\n\n await ctx.send(embed=embed)\n\n\ndef setup(bot):\n bot.add_cog(Images(bot))\n","repo_name":"OwenTiedemann/HowlerV2","sub_path":"cogs/imagecommands.py","file_name":"imagecommands.py","file_ext":"py","file_size_in_byte":2797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10305839541","text":"set1Length= int(input())\nelementsSet1= set(map(int, input().split()))\nset2Length= int(input())\nelementsSet2= set(map(int, input().split()))\nuniqueList1= list(elementsSet1.difference(elementsSet2))\nuniqueList2= list(elementsSet2.difference(elementsSet1))\ncombinedList= [item for item in uniqueList1]\nfor item2 in uniqueList2:\n combinedList.append(item2)\n\ncombinedList.sort()\nfor item in combinedList:\n print(item)\n","repo_name":"mananaggarwal2001/Programs","sub_path":"Hackerrank/HackerrankQ30(sets problem).py","file_name":"HackerrankQ30(sets problem).py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"21222018251","text":"import cupy\nfrom cupy.core import core\n\n\ndef isdense(x):\n return isinstance(x, core.ndarray)\n\n\ndef isintlike(x):\n try:\n return bool(int(x) == x)\n except (TypeError, ValueError):\n return False\n\n\ndef isscalarlike(x):\n return cupy.isscalar(x) or (isdense(x) and x.ndim == 0)\n\n\ndef isshape(x):\n if not isinstance(x, tuple) or len(x) != 2:\n return False\n m, n = x\n return isintlike(m) and isintlike(n)\n\n\ndef validateaxis(axis):\n if axis is not None:\n axis_type = type(axis)\n\n if axis_type == tuple:\n raise TypeError(\n 'Tuples are not accepted for the \\'axis\\' '\n 'parameter. Please pass in one of the '\n 'following: {-2, -1, 0, 1, None}.')\n\n if not cupy.issubdtype(cupy.dtype(axis_type), cupy.integer):\n raise TypeError('axis must be an integer, not {name}'\n .format(name=axis_type.__name__))\n\n if not (-2 <= axis <= 1):\n raise ValueError('axis out of range')\n","repo_name":"yuhc/ava-cupy","sub_path":"cupyx/scipy/sparse/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"74797626933","text":"import argparse\nfrom torchvision.utils import save_image\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\nfrom models_LAM import *\nfrom datasets_evaluation import *\nimport time\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--data_path\", type=str, default=\"PPR10K_dataset\", help=\"root of the datasets\")\nparser.add_argument(\"--gpu_id\", type=str, default=\"0\", help=\"gpu id\")\nparser.add_argument(\"--epoch\", type=int, default=0, help=\"epoch to load\")\nparser.add_argument(\"--model_dir\", type=str, default=\"Checkpoints_LAM_a\", help=\"path to save model\")\nparser.add_argument(\"--lut_dim\", type=int, default=33, help=\"dimension of lut\")\nopt = parser.parse_args()\n\nos.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu_id\ncuda = True if torch.cuda.is_available() else False\ncriterion_pixelwise = torch.nn.MSELoss()\n\nMaskContext = MaskContext()\nContext = ContextModulate()\nLUT1 = Generator3DLUT_identity(dim=opt.lut_dim)\nLUT2 = Generator3DLUT_identity(dim=opt.lut_dim)\nLUT3 = Generator3DLUT_identity(dim=opt.lut_dim)\nLUT4 = Generator3DLUT_identity(dim=opt.lut_dim)\nLUT5 = Generator3DLUT_identity(dim=opt.lut_dim)\nclassifier = resnet18_224(out_dim=5)\ntrilinear_ = TrilinearInterpolation()\n\nif cuda:\n Context = Context.cuda()\n LUT1 = LUT1.cuda()\n LUT2 = LUT2.cuda()\n LUT3 = LUT3.cuda()\n LUT4 = LUT4.cuda()\n LUT5 = LUT5.cuda()\n classifier = classifier.cuda()\n criterion_pixelwise.cuda()\n MaskContext = MaskContext.cuda()\n\nLUTs = torch.load(\"saved_models/%s/LUTs_%d.pth\" % (opt.model_dir, opt.epoch))\nLUT1.load_state_dict(LUTs[\"1\"])\nLUT2.load_state_dict(LUTs[\"2\"])\nLUT3.load_state_dict(LUTs[\"3\"])\nLUT4.load_state_dict(LUTs[\"4\"])\nLUT5.load_state_dict(LUTs[\"5\"])\nContext.load_state_dict(\n torch.load(\"saved_models/%s/Context_%d.pth\" % (opt.model_dir, opt.epoch)))\nclassifier.load_state_dict(\n torch.load(\"saved_models/%s/classifier_%d.pth\" % (opt.model_dir, opt.epoch)))\nMaskContext.load_state_dict(\n torch.load(\"saved_models/%s/MaskContext_%d.pth\" % (opt.model_dir, opt.epoch)))\n\nContext.eval()\nclassifier.eval()\nMaskContext.eval()\n\n# upsample = nn.Upsample(size=(360, 540), mode='bilinear')\n\ndataloader = DataLoader(\n ImageDataset_paper(opt.data_path),\n batch_size=1,\n shuffle=False,\n num_workers=1,\n)\n\nTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\n\n\ndef generator(img):\n imgs = Img_X_Mask(img, MaskContext(img))\n Mask = Context(imgs).squeeze()\n pred = classifier(img).squeeze()\n gen_A1 = LUT1(img)\n gen_A2 = LUT2(img)\n gen_A3 = LUT3(img)\n gen_A4 = LUT4(img)\n gen_A5 = LUT5(img)\n\n combine_A = img.new(img.size())\n combine_A[0, :, :, :] = (\n torch.mul(gen_A1, Mask[0]) * pred[0] + torch.mul(gen_A2, Mask[1]) * pred[1] +\n torch.mul(gen_A3, Mask[2]) * pred[2] + torch.mul(gen_A4, Mask[3]) * pred[3] +\n torch.mul(gen_A5, Mask[4]) * pred[4])\n\n return combine_A\n\n\ndef visualize_result():\n \"\"\"Saves a generated sample from the validation set\"\"\"\n out_dir = \"results/%s_%d\" % (opt.model_dir, opt.epoch)\n os.makedirs(out_dir, exist_ok=True)\n sum_time = 0\n img_number = 0\n for i, batch in enumerate(dataloader):\n input_A = Variable(batch[\"A_input\"].type(Tensor))\n # input_A = upsample(input_A)\n img_name = batch[\"input_name\"]\n # print(input_A.shape)\n start_time = time.time()\n\n result_B = generator(input_A)\n\n end_time = time.time()\n time_difference = end_time - start_time\n sum_time += time_difference\n img_number += 1\n print(img_number, sum_time)\n save_image(result_B, os.path.join(out_dir, \"%s.png\" % (img_name[0][:-4])), nrow=1, normalize=False)\n\n ave_time = sum_time / img_number\n print(ave_time)\n\n\nwith torch.no_grad():\n visualize_result()\n","repo_name":"CodeMonsterPHD/PWA","sub_path":"code/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":3790,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"33988405948","text":"\"\"\"– Uma vez que temos uma lista contendo como elementos próprios uma série de tuplas compostas por uma categoria e por um tipo de exame de imagem, reconstrua essa base de dados em um dicionário, unificando as categorias de exames e agrupando seus subtipos.\n\"\"\"\n\ndef transforma_tupla_em_dict(lista_de_tupla):\n data_dict = {}\n\n for especialidade, exame in data:\n data_dict.setdefault(especialidade, []).append(exame)\n\n return data_dict\n\nif __name__ == '__main__':\n data = [\n ('Raios-X', 'Raios-X'),\n ('Magnetismo', 'Ressonância magnetica'),\n ('Ultrassom', 'Cintelografia'),\n ('Raios-X', 'Tomografia computadorizada'),\n ('Medicina nuclear', 'PET-CT'),\n ('Raios-X', 'Mamografia'),\n ('Raios-X', 'Densitometria Ossea'),\n ]\n\n print(transforma_tupla_em_dict(data))\n","repo_name":"carlosrjhoe/Python","sub_path":"Livro_Python_na_Prática_2/elementos_tuplas.py","file_name":"elementos_tuplas.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9698698470","text":"import os\nimport inspect\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(os.path.dirname(currentdir))\nos.sys.path.insert(0, parentdir)\n\nimport pybullet as p\nimport math\nimport time\nimport pybullet_data\n\np.connect(p.GUI)\n#p.loadURDF(\"wheel.urdf\",[0,0,3])\np.setAdditionalSearchPath(pybullet_data.getDataPath())\nplane = p.loadURDF(\"plane100.urdf\", [0, 0, 0])\ntimestep = 1. / 240.\n\nbike = -1\nfor i in range(1):\n\n bike = p.loadURDF(\"bicycle/bike.urdf\", [0, 0 + 3 * i, 1.5], [0, 0, 0, 1], useFixedBase=False)\n p.setJointMotorControl2(bike, 0, p.VELOCITY_CONTROL, targetVelocity=0, force=0.05)\n #p.setJointMotorControl2(bike,1,p.VELOCITY_CONTROL,targetVelocity=5, force=1000)\n p.setJointMotorControl2(bike, 1, p.VELOCITY_CONTROL, targetVelocity=5, force=0)\n p.setJointMotorControl2(bike, 2, p.VELOCITY_CONTROL, targetVelocity=15, force=20)\n\n p.changeDynamics(plane, -1, mass=0, lateralFriction=1, linearDamping=0, angularDamping=0)\n p.changeDynamics(bike, 1, lateralFriction=1, linearDamping=0, angularDamping=0)\n p.changeDynamics(bike, 2, lateralFriction=1, linearDamping=0, angularDamping=0)\n #p.resetJointState(bike,1,0,100)\n #p.resetJointState(bike,2,0,100)\n #p.resetBaseVelocity(bike,[0,0,0],[0,0,0])\n#p.setPhysicsEngineParameter(numSubSteps=0)\n#bike=p.loadURDF(\"frame.urdf\",useFixedBase=True)\n#bike = p.loadURDF(\"handlebar.urdf\", useFixedBase=True)\n#p.loadURDF(\"handlebar.urdf\",[0,2,1])\n#coord\t=\tp.loadURDF(\"handlebar.urdf\", [0.7700000000000005,\t0, 0.22000000000000006],useFixedBase=True)#\tp.loadURDF(\"coordinateframe.urdf\",[-2,0,1],useFixedBase=True)\n#coord\t=\tp.loadURDF(\"coordinateframe.urdf\",[-2,0,1],useFixedBase=True)\np.setGravity(0, 0, -10)\np.setRealTimeSimulation(0)\n#coordPos\t=\t[0,0,0]\n#coordOrnEuler = [0,0,0]\n\n#coordPos= [0.7000000000000004, 0, 0.22000000000000006]\n#coordOrnEuler= [0, -0.2617993877991496, 0]\n\ncoordPos = [0.07, 0, -0.6900000000000004]\ncoordOrnEuler = [0, 0, 0]\n\ncoordPos2 = [0, 0, 0]\ncoordOrnEuler2 = [0, 0, 0]\n\ninvPos, invOrn = p.invertTransform(coordPos, p.getQuaternionFromEuler(coordOrnEuler))\nmPos, mOrn = p.multiplyTransforms(invPos, invOrn, coordPos2,\n p.getQuaternionFromEuler(coordOrnEuler2))\neul = p.getEulerFromQuaternion(mOrn)\nprint(\"rpy=\\\"\", eul[0], eul[1], eul[2], \"\\\" xyz=\\\"\", mPos[0], mPos[1], mPos[2])\n\nshift = 0\ngui = 1\n\nframe = 0\nstates = []\nstates.append(p.saveState())\n#observations=[]\n#observations.append(obs)\n\nrunning = True\nreverting = False\np.getCameraImage(320, 200) #,renderer=p.ER_BULLET_HARDWARE_OPENGL )\n\nwhile (1):\n\n updateCam = 0\n keys = p.getKeyboardEvents()\n for k, v in keys.items():\n if (reverting or (k == p.B3G_LEFT_ARROW and (v & p.KEY_WAS_TRIGGERED))):\n reverting = True\n stateIndex = len(states) - 1\n #print(\"prestateIndex=\",stateIndex)\n time.sleep(timestep)\n updateCam = 1\n if (stateIndex > 0):\n stateIndex -= 1\n states = states[:stateIndex + 1]\n #observations=observations[:stateIndex+1]\n\n #print(\"states=\",states)\n #print(\"stateIndex =\",stateIndex )\n p.restoreState(states[stateIndex])\n #obs=observations[stateIndex]\n\n #obs, r, done, _ = env.step(a)\n if (k == p.B3G_LEFT_ARROW and (v & p.KEY_WAS_RELEASED)):\n reverting = False\n\n if (k == ord('1') and (v & p.KEY_WAS_TRIGGERED)):\n gui = not gui\n\n if (k == p.B3G_RIGHT_ARROW and (v & p.KEY_WAS_RELEASED)):\n running = False\n\n if (running or (k == p.B3G_RIGHT_ARROW and (v & p.KEY_WAS_TRIGGERED))):\n running = True\n\n if (running):\n\n p.stepSimulation()\n\n updateCam = 1\n time.sleep(timestep)\n pts = p.getContactPoints()\n #print(\"numPoints=\",len(pts))\n #for point in pts:\n #\tprint(\"Point:bodyA=\", point[1],\"bodyB=\",point[2],\"linkA=\",point[3],\"linkB=\",point[4],\"dist=\",point[8],\"force=\",point[9])\n\n states.append(p.saveState())\n #observations.append(obs)\n stateIndex = len(states)\n #print(\"stateIndex =\",stateIndex )\n frame += 1\n if (updateCam):\n distance = 5\n yaw = 0\n humanPos, humanOrn = p.getBasePositionAndOrientation(bike)\n humanBaseVel = p.getBaseVelocity(bike)\n #print(\"frame\",frame, \"humanPos=\",humanPos, \"humanVel=\",humanBaseVel)\n if (gui):\n\n camInfo = p.getDebugVisualizerCamera()\n curTargetPos = camInfo[11]\n distance = camInfo[10]\n yaw = camInfo[8]\n pitch = camInfo[9]\n targetPos = [\n 0.95 * curTargetPos[0] + 0.05 * humanPos[0], 0.95 * curTargetPos[1] + 0.05 * humanPos[1],\n curTargetPos[2]\n ]\n p.resetDebugVisualizerCamera(distance, yaw, pitch, targetPos)\n","repo_name":"WolfireGames/overgrowth","sub_path":"Projects/bullet3-2.89/examples/pybullet/gym/pybullet_envs/examples/testBike.py","file_name":"testBike.py","file_ext":"py","file_size_in_byte":4627,"program_lang":"python","lang":"en","doc_type":"code","stars":2304,"dataset":"github-code","pt":"21"} +{"seq_id":"4929522153","text":"\nimport math\n\n\ndef express():\n\n # 4**3 = 64 possible sequences\n \n # yes: \n\n # ppp, nnn, ddd, qqq\n\n # pnn\n # pdd, ndd\n # pqq, nqq, dqq\n\n # no: \n\n # pnd, pnq, dnq\n # \n \n\n \n pass\n\n\n\n\ndef check_game(n):\n\n # 200 games, 800 at bats\n # 800 * .2 = 160\n\n at_bats = n * 4\n\n nearby = at_bats * (n / 1000.0)\n\n below = int(math.floor(nearby))\n above = int(math.ceil(nearby))\n\n #if n == 249:\n # import pdb; pdb.set_trace()\n\n\n if int(round(1000 * below / at_bats)) == n:\n return True\n\n if int(round(1000 * above / at_bats)) == n:\n return True\n\n return False\n\n\n\ndef classic():\n\n impossible_games = [e for e in range(1, 1001) if not check_game(e)]\n \n return impossible_games\n\n\n\ndef main():\n print(classic())\n\n\nif __name__ == \"__main__\":\n main()\n\n\n","repo_name":"chirs/puzzles","sub_path":"riddler/2021/03.05.py","file_name":"03.05.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26136624043","text":"import matplotlib.pyplot as plt \nimport matplotlib.image as mgimg\nfrom matplotlib import animation\nimport os\nn_imagenes=os.listdir('figuras/')\nframes = []\nfor i in range (len(n_imagenes)):\n frames.append('figuras/'+str(i)+'.png')\n\n\n\n\nfig = plt.figure()\n\nmyimages = []\nplt.axis('off')\nfor p in frames:\n fname = p\n img = mgimg.imread(fname)\n imgplot = plt.imshow(img)\n myimages.append([imgplot])\n\n\nmy_anim = animation.ArtistAnimation(fig, myimages, interval=200, blit=True, repeat_delay=1000)\n\n\nmy_anim.save(\"gaus_seidel_v30_a10.mp4\")\n\n","repo_name":"andersonruales123/CursoFCII-2020-1","sub_path":"Documentos/Proyectofinal/CC1143966473/proyecto_final/archivos_Gauss_seidel/gif.py","file_name":"gif.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"72439825972","text":"# Dictionaries\n# https://automatetheboringstuff.com/chapter5/\n\n\nstuff = {'rope': 1, 'torch': 6, 'gold coin': 42, 'dagger': 1, 'arrow': 12}\n\ndef displayInventory(inventory):\n print(\"Inventory:\")\n item_total = 0\n for k, v in inventory.items():\n # FILL IN THE CODE HERE\n print(k, \": \", v)\n item_total += v\n print(\"Total number of items: \" + str(item_total))\n\n#displayInventory(stuff)\n\ndef addToInventory(inventory, addedItems):\n # your code goes here\n print(\"before: \", inventory)\n for i in addedItems:\n if i in inventory:\n inventory[i] = inventory[i] + 1\n else:\n inventory.setdefault(i, 1)\n print(\"after: \", inventory)\n return inventory\n\ninv = {'gold coin': 42, 'rope': 1}\ndragonLoot = ['gold coin', 'dagger', 'gold coin', 'gold coin', 'ruby']\ninv = addToInventory(inv, dragonLoot)\ndisplayInventory(inv)\n","repo_name":"hhhoang/100DaysOfCode_HH","sub_path":"Day12.py","file_name":"Day12.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"24455648502","text":"import pandas as pd\nimport seaborn as sns\nimport sklearn as sk\nimport matplotlib.pyplot as plt\nsns.set()\n\ndata = pd.read_csv('../data/linear-regression/real_estate_price_size.csv')\nfrom sklearn.linear_model import LinearRegression\n\nprint(data.head())\n\nx = data['size']\ny = data['price']\n\nx_matrix = x.values.reshape(-1,1)\n\nreg = LinearRegression()\n\nreg.fit(x_matrix, y)\n\n#1. R Square value\nprint(reg.score(x_matrix, y))\n\n#2. Co-efficient\nprint(reg.coef_)\n\n#3. Intercept\nprint(reg.intercept_)\n\n#4. Predict Price\nnew_data = pd.DataFrame(data=[789, 890], columns=['size'])\nnew_data['Predicted_Price'] = reg.predict(new_data)\n\nprint(new_data)\nplt.scatter(x,y)\nyhat = reg.coef_ * x_matrix + reg.intercept_\nfig = plt.plot(x, yhat, lw=2, c='red', label='regression line')\nplt.xlabel('size', fontsize=15)\nplt.ylabel('price', fontsize=15)\nplt.show()","repo_name":"tulans/LearnDataScience","sub_path":"RegressionWithSKLearn/LinearRegressionSkLearnRealEstate.py","file_name":"LinearRegressionSkLearnRealEstate.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25945432461","text":"from fakts.grants.base import FaktsGrant\nfrom fakts.grants.errors import GrantError\nimport os\nimport logging\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef nested_set(dic, keys, value):\n for key in keys[:-1]:\n dic = dic.setdefault(key, {})\n dic[keys[-1]] = value\n\n\nclass EnvGrant(FaktsGrant):\n \"\"\"Extras a configuration tree from the current environment\n\n Example:\n ```env\n FAKTS__GROUP_NAME__KEY_NAME=value\n ```\n ```python\n grant = EnvGrant()\n config = await grant.load()\n print(config[\"group_name\"][\"key_name\"]) # value\n ```\n\n \"\"\"\n\n prepend: str = \"FAKTS_\"\n delimiter: str = \"__\"\n\n async def aload(self, **kwargs):\n\n try:\n data = {}\n\n for key, value in os.environ.items():\n if self.prepend:\n if not key.startswith(self.prepend):\n continue\n key = key[len(self.prepend) :]\n\n path = list(map(lambda x: x.lower(), key.split(self.delimiter)))\n nested_set(data, path, value)\n\n return data\n\n except Exception as e:\n raise GrantError(f\"Could not load from env: {e}\") from e\n","repo_name":"jhnnsrs/fakts","sub_path":"fakts/grants/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19208303165","text":"from OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom OpenGL.GLUT import *\nimport sys\n\nglobal x1,y1, win, view_w, view_h, wo, ho\n\ndef Desenha():\n global x1,y1, win, view_w, view_h\n \n glClearColor(0.0, 0.0, 0.0, 0.0) # Define o fundo preto\n\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n\n# Desenha Poligono branco\n glClear(GL_COLOR_BUFFER_BIT)\n glBegin(GL_POLYGON)\n glColor3f(255.0,255.0,255.0)\n glVertex2f(-0.4+x1, 0.3+y1)\n glVertex2f(-0.4+x1, -0.3+y1)\n glVertex2f(0.4+x1, -0.3+y1)\n glVertex2f(0.4+x1, 0.3+y1)\n glEnd()\n\n#Define cor da estrela\n glColor3f(0.0, 0.0, 255.0)\n\n#### --- Começo da estrela\n\n glBegin(GL_TRIANGLES)\n glVertex2f(-0.08+x1, 0.0+y1)\n glVertex2f(0.0+x1, -0.08+y1)\n glVertex2f(0.08+x1, 0.0+y1)\n glEnd()\n\n#DesenhaTriangulo de cima\n\n glBegin(GL_TRIANGLES)\n glVertex2f(-0.03+x1, 0.0+y1)\n glVertex2f(0.03+x1, 0.0+y1)\n glVertex2f(0.0+x1, 0.06+y1)\n glEnd()\n\n#Desenha Triangulo de baixo -- esquerda\n glBegin(GL_TRIANGLES)\n glVertex2f(0.0+x1, -0.08+y1)\n glVertex2f(-0.05+x1, -0.13+y1)\n glVertex2f(-0.03+x1, -0.01+y1)\n glEnd()\n\n#Desenha Triangulo de baixo -- direita\n\n glBegin(GL_TRIANGLES)\n glVertex2f(0.0+x1, -0.08+y1)\n glVertex2f(0.05+x1, -0.13+y1)\n glVertex2f(0.03+x1, -0.01+y1)\n glEnd()\n\n#### --- Final da estrela\n\n# Desenha Triangulo Vermelho de cima\n\n glBegin(GL_TRIANGLES)\n glColor3f(255.0, 0.0, 0.0)\n glVertex2f(-0.2+x1, 0.3+y1)\n glVertex2f(0.4+x1, 0.3+y1)\n glVertex2f(0.4+x1, -0.1+y1)\n glEnd()\n\n\n\n# Desenha Triangulo Vermelho de baixo\n glBegin(GL_TRIANGLES)\n glVertex2f(-0.4+x1, 0.0+y1)\n glVertex2f(-0.4+x1, -0.3+y1)\n glVertex2f(0.2+x1, -0.3+y1)\n glEnd()\n glutSwapBuffers()\n\n\ndef Inicializa():\n global x1,y1, win\n glClearColor(0.0,0.0,0.0,1.0)\n x1=1.0\n y1=1.0\n win=250.0\n\ndef AlterandoTamanhoJanela(w,h):\n global x1,y1, win, view_w, view_h, wo, ho\n\n glViewport(0,0,w,h)\n view_w = w\n view_h = h\n wo = w\n ho = h\n print(w,h)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n\n if (w <= h):\n view_h = 4.0*h/w\n view_w = 2.0\n else:\n view_w = 4.0*h/w\n view_h = 2.0\n \n gluOrtho2D(0.0, view_w, 0.0, view_h)\n\ndef GerenciaTeclado(key, x, y):\n global x1, y1, win, view_w, view_h,wo,ho\n if(key==b'R' or key ==b'r'):\n glColor3f(1.0, 0.0, 0.0)\n elif (key==b'G' or key ==b'g'):\n glColor3f(0.0, 1.0, 0.0)\n elif (key==b'B' or key ==b'b'):\n glColor3f(0.0, 0.0, 1.0)\n print (key)\n glutPostRedisplay()\n \n# Função callback chamada para gerenciar eventos do mouse\ndef GerenciaMouse(button, state, x, y):\n global x1, y1, win, view_w, view_h\n if (button == GLUT_LEFT_BUTTON):\n if (state == GLUT_DOWN):\n # Troca o tamanho do retângulo, que vai do centro da \n # janela até a posição onde o usuário clicou com o mouse\n escalax=view_w/wo\n x1 = x*escalax\n escalay=view_h/ho\n y1 = (ho-y)*escalay\n glutPostRedisplay()\n\n#// Função callback chamada para gerenciar eventos do teclado \n#// para teclas especiais, tais como F1, PgDn e Home\n\ndef TeclasEspeciais(key, x, y):\n global x1, y1, win, view_w, view_h\n if(key == GLUT_KEY_UP):\n y1=y1+0.1\n if (y1+0.3>view_h):\n y1=view_h-0.3\n \n if(key == GLUT_KEY_DOWN):\n y1=y1-0.2\n if (y1-0.3<0):\n y1=0.3\n \n if(key == GLUT_KEY_LEFT):\n x1=x1-0.1\n if (x1-0.4<0):\n x1=0.4\n \n if(key == GLUT_KEY_RIGHT):\n x1=x1+0.1\n if (x1+0.4>view_w):\n x1=view_w-0.4\n\n glutPostRedisplay()\n\n#// Programa Principal \ndef main():\n glutInit(sys.argv)\n glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB)\n glutInitWindowSize(500,350)\n glutInitWindowPosition(10,10)\n glutCreateWindow(b\"Interacao\")\n glutDisplayFunc(Desenha)\n glutReshapeFunc(AlterandoTamanhoJanela)\n glutKeyboardFunc(GerenciaTeclado)\n glutMouseFunc(GerenciaMouse)\n glutSpecialFunc(TeclasEspeciais)\n Inicializa()\n glutMainLoop()\n\nmain()\n","repo_name":"amandae17/OpenGL","sub_path":"Bandeira Interacao.py","file_name":"Bandeira Interacao.py","file_ext":"py","file_size_in_byte":4177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27539951305","text":"from datetime import datetime\n\nfrom vnpy.trader.engine import BaseEngine, MainEngine, EventEngine\nfrom vnpy.trader.constant import Interval, Exchange\nfrom vnpy_datamanager.engine import ManagerEngine\n\nfrom vnpy.trader.utility import load_json\nfrom threading import Thread\n\n\nclass DataManager(ManagerEngine):\n def __init__(self, main_engine=None, event_engin=None):\n if main_engine is None:\n event_engine = EventEngine()\n main_engine = MainEngine(event_engine)\n\n super().__init__(main_engine, event_engine)\n self.main_engine.event_engine.register(\"eLog\", lambda e: print(e.data))\n\n def connect(self, gateway_name: str, setting, gateway_class=None) -> None:\n gateway = self.main_engine.gateways.get(gateway_name, None)\n if not gateway:\n self.main_engine.add_gateway(gateway_class, gateway_name)\n\n if type(setting) is str:\n setting = load_json(setting)\n # setting['代理地址'] = '127.0.0.1'\n # setting['代理端口'] = '20171'\n\n self.main_engine.connect(setting, gateway_name)\n\n def download_bar_data(\n self,\n symbol: str,\n exchange: Exchange,\n interval: str,\n start: datetime,\n ) -> bool:\n\n self.thread = Thread(\n target=super().download_bar_data,\n args=(\n symbol,\n exchange,\n interval,\n start,\n )\n )\n self.thread.start()\n return True","repo_name":"cloudseasail/vnpy_cryptostrategy","sub_path":"vnpy_cryptostrategy/utils/data_manager.py","file_name":"data_manager.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"38282568528","text":"'''\nDesenvolva um programa que leia nome, idade e sexo de 4 pessoas.\nNo final do pragrama, mostre:\n- A média de idade do grupo.\n- Qual é o nome do homem mais velho.\n- Quantas mulheres têm menos de 20 anos.\n'''\ncount = 0\ntotalagef = 0\ntotalagem = 0\ngirlscount = 0\noldmanage = 0\nfor i in ('1st', '2nd', '3rd', '4th'):\n count += 1 # Just to insert more loops\n while True:\n print('----- {} person -----'.format(i))\n name = input('Name: ').strip().split()\n name = name[0]\n\n try:\n age = int(input('Age: ').strip())\n except:\n print('Invalid value!')\n continue\n\n sex = input('Sex [M/F]: ').strip().upper()\n if sex == 'F':\n totalagef += age\n if age < 20:\n girlscount += 1\n break\n elif sex == 'M':\n totalagem += age\n if age > oldmanage:\n oldmanage = age\n oldmanname = name\n break\n else:\n print('Invalid value!')\n continue\n\nprint('The avarege of persons age is {:.2f}.'.format((totalagef + totalagem)/count))\nif totalagem > 0:\n print('The elder man is {}, he is {} years old.'.format(oldmanname, oldmanage))\nprint('There are {} girls with less than 20 years.'.format(girlscount))","repo_name":"marcosdemelo00/Python-Exercises-CursoemVideo","sub_path":"World 2 - ex036 to ex071/ex056 - Analisador Completo (Complete Analyzer).py","file_name":"ex056 - Analisador Completo (Complete Analyzer).py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37448192483","text":"import asyncio\nimport datetime\nimport functools\nimport io\nimport os\nimport shutil\nimport socket\nimport time\nimport zipfile\n\nimport aiohttp\nimport discord\n\nfrom urllib.parse import urlparse\nfrom psycopg2.extras import Json\n\nfrom jshbot import data, configurations, core, logger\nfrom jshbot.exceptions import BotException, ConfiguredBotException\n\n\nCBException = ConfiguredBotException('Utilities')\n\n\n# Voice region time offsets (no DST)\nVOICE_REGIONS = {\n 'us-west': -8,\n 'us-east': -5,\n 'us-south': -6,\n 'us-central': -6,\n 'eu-west': 1, # West European Summer Time\n 'eu-central': 2, # Central European Summer Time\n 'singapore': 8,\n 'london': 0,\n 'sydney': 10,\n 'amsterdam': 2, # CEST\n 'frankfurt': 2, # CEST\n 'brazil': -3,\n 'vip-us-east': -5,\n 'vip-us-west': -8,\n 'vip-amsterdam': 2 # CEST\n}\n\n# Integer to emoji conversion\nNUMBER_EMOJIS = [\n ':zero:', ':one:', ':two:', ':three:', ':four:', ':five:',\n ':six:', ':seven:', ':eight:', ':nine:', ':keycap_ten:'\n]\n\n\nclass BaseConverter():\n def __init__(self):\n self.error_reason = \"Unknown conversion error\"\n def get_convert_error(self, *args):\n return self.error_reason\n\n\nclass MemberConverter(BaseConverter):\n def __init__(self, server_only=True, live_check=None, attribute=None):\n self.server_only = server_only\n self.live_check = live_check\n self.attribute = attribute\n super().__init__()\n async def __call__(self, bot, message, value, *a):\n if self.live_check:\n self.server_only = self.live_check(bot, message, value, *a)\n guild = message.guild if self.server_only else None\n try:\n return await data.fetch_member(\n bot, value, guild=guild, strict=self.server_only, attribute=self.attribute)\n except BotException as e:\n self.set_error_reason(e, 'member')\n def set_error_reason(self, error, convert_type):\n if error.error_details.startswith('Duplicate'):\n pre_format = \"Duplicate {}s found.\".format(convert_type)\n else:\n pre_format = \"{} '{}' not found.\".format(convert_type.title(), error.other_details)\n self.error_reason = pre_format + ' Please use a mention or raw user ID.'\n assert False # To trigger the conversion error\n\n\nclass ChannelConverter(MemberConverter):\n def __init__(self, server_only=True, live_check=None, constraint=None, attribute=None):\n \"\"\"Constraint can be used to specify only text or voice channels.\n\n The constraitn can either be discord.VoiceChannel or discord.TextChannel\n \"\"\"\n self.server_only = server_only\n self.constraint = constraint\n super().__init__(live_check=live_check, attribute=attribute)\n def __call__(self, bot, message, value, *a):\n if self.live_check:\n guild = self.live_check(bot, message, value, *a)\n else:\n guild = message.guild if self.server_only else None\n try:\n return data.get_channel(\n bot, value, guild=guild, strict=self.server_only,\n constraint=self.constraint, attribute=self.attribute)\n except BotException as e:\n self.set_error_reason(e, 'channel')\n\n\nclass RoleConverter(MemberConverter):\n def __init__(self, attribute=None):\n super().__init__(attribute=attribute)\n def __call__(self, bot, message, value, *a):\n try:\n return data.get_role(bot, value, message.guild, attribute=self.attribute)\n except BotException as e:\n self.set_error_reason(e, 'role')\n\n\nclass PercentageConverter(BaseConverter):\n def __init__(self, accuracy=3):\n self.accuracy = int(accuracy)\n super().__init__()\n def __call__(self, bot, message, value, *a):\n cleaned = value.strip('%')\n try:\n converted = float(cleaned)\n except:\n raise CBException(\"Must be a percentage.\")\n else:\n if self.accuracy is not None:\n converted = round(converted, self.accuracy)\n return converted/100\n\n\nclass HexColorConverter(BaseConverter):\n def __call__(self, bot, message, value, *a):\n try:\n return discord.Color(int(value.lower()[-6:], 16))\n except:\n raise CBException(\"Invalid hex color.\")\n\n\ndef add_bot_permissions(bot, plugin_name, **permissions):\n \"\"\"Adds the given permissions to the bot for authentication generation.\"\"\"\n dummy = discord.Permissions()\n for permission in permissions:\n try:\n getattr(dummy, permission.lower())\n except: # Permission not found\n raise CBException(\"Permission '{}' does not exist\".format(permission))\n current = data.get(\n bot, plugin_name, 'permissions', create=True, volatile=True)\n if current is None:\n data.add(bot, plugin_name, 'permissions', permissions, volatile=True)\n\n\ndef get_permission_bits(bot):\n \"\"\"Calculates all of the permissions for each plugin.\"\"\"\n dummy = discord.Permissions()\n for plugin in bot.plugins.keys():\n for permission in data.get(\n bot, plugin, 'permissions', volatile=True, default={}):\n setattr(dummy, permission.lower(), True)\n return dummy.value\n\n\nasync def can_interact(bot, member, channel_id=None):\n \"\"\"Checks that the given member can be interacted with.\n\n This ensures that the user is:\n Not a bot\n Not blocked in the server\n\n Additionally, if the user is a member (guild exists):\n Not in a blocked channel\n Not blacklisted by the botowners\n\n If given a channel ID, also checks that the bot is not muted in there\n\n This also checks for maintenace mode\n \"\"\"\n if data.is_owner(bot, member.id):\n return True\n elif member.bot or member.id in data.get(bot, 'core', 'blacklist', default=[]):\n return False\n elif bot.maintenance_mode:\n return False\n\n # Guild specific check\n guild = getattr(member, 'guild', None)\n if guild:\n if data.is_mod(bot, member=member):\n return True\n guild_data = data.get(bot, 'core', None, guild.id, default={})\n if (guild_data.get('muted', False) or\n (channel_id in guild_data.get('muted_channels', [])) or\n (member.id in guild_data.get('blocked', []))):\n return False\n\n return True\n\n\nasync def download_url(\n bot, url, headers={'User-Agent': 'Mozilla/5.0'},\n include_name=False, extension=None, filename=None, use_fp=False):\n \"\"\"Asynchronously downloads the given file to the temp folder.\n\n Returns the path of the downloaded file. If include_name is True, returns\n a tuple of the file location and the file name.\n\n If use_fp, this will use a BytesIO object instead of downloading to a file.\n \"\"\"\n if use_fp:\n fp = io.BytesIO()\n else:\n if not filename:\n filename = get_cleaned_filename(url, extension=extension)\n file_location = '{0}/temp/{1}'.format(bot.path, filename)\n try:\n response_code, downloaded_bytes = await get_url(bot, url, get_bytes=True, headers=headers)\n if response_code != 200:\n raise CBException(\"Failed to download file.\", response_code)\n if use_fp:\n fp.write(downloaded_bytes)\n fp.seek(0)\n return fp\n else:\n with open(file_location, 'wb') as download:\n download.write(downloaded_bytes)\n if include_name:\n return (file_location, filename)\n else:\n return file_location\n except Exception as e:\n raise CBException(\"Failed to download the file.\", e=e)\n\n\ndef delete_temporary_file(bot, filename, safe=True):\n \"\"\"Deletes the given file from the temp folder.\"\"\"\n try:\n os.remove('{0}/temp/{1}'.format(bot.path, filename))\n except Exception as e:\n if not safe:\n raise CBException(\"File could not be deleted.\", e=e)\n\n\ndef get_temporary_file(bot, filename, safe=True):\n \"\"\"Gets the filename from the temp folder.\"\"\"\n test_path = '{0}/temp/{1}'.format(bot.path, filename)\n if os.path.isfile(test_path):\n return test_path\n elif safe:\n return None\n else:\n raise CBException(\"Temporary file not found.\")\n\n\ndef add_temporary_file(bot, bytes_io, filename, seek=True, overwrite=True, safe=False):\n \"\"\"Dumps the binary file into the temp folder.\"\"\"\n test_path = '{0}/temp/{1}'.format(bot.path, filename)\n if os.path.isfile(test_path) and not overwrite and not safe:\n raise CBException(\"Temporary file already exists.\")\n else:\n try:\n if seek and bytes_io.seekable():\n bytes_io.seek(0)\n write_type = 'w' if isinstance(bytes_io, io.StringIO) else 'wb'\n with open(test_path, write_type) as temp_file:\n temp_file.write(bytes_io.read())\n except Exception as e:\n if not safe:\n raise CBException(\"Failed to write temporary file.\", e=e)\n\n\ndef get_plugin_file(bot, filename, safe=True):\n \"\"\"Gets the plugin file in the plugin_data directory.\"\"\"\n test_path = '{0}/plugins/plugin_data/{1}'.format(bot.path, filename)\n if os.path.isfile(test_path):\n return test_path\n elif safe:\n return None\n else:\n raise CBException(\"Plugin file '{}' not found.\".format(filename))\n\n\ndef valid_url(url):\n \"\"\"Checks that the given URL is Discord embed friendly. Or at least, it tries.\"\"\"\n\n def _valid_string(segment, main=True):\n if not len(segment):\n return False\n for c in [ord(it.lower()) for it in segment]:\n if not (97 <= c <= 122 or (main and (48 <= c <= 57 or c == 45))):\n return False\n return True\n\n test = urlparse(url)\n if not (test.scheme and test.netloc and '.' in test.netloc):\n return False\n\n # Discord only accepts http or https\n if test.scheme not in ('http', 'https'):\n return False\n\n # Test for valid netloc\n netloc_split = test.netloc.split('.')\n if (len(netloc_split) < 2):\n return False # http://foo\n tld = test.netloc.split('.')[-1]\n if not (len(tld) >= 2 and _valid_string(tld, main=False)):\n return False # http://foo.123\n for segment in netloc_split[:-1]:\n if not _valid_string(segment):\n return False # http://foo..bar or http://fo*o.bar\n for c in url:\n if not 33 <= ord(c) <= 126:\n return False # non-ASCII only URLs\n return True\n\n\nasync def get_url(bot, urls, headers={}, read_response=True, get_bytes=False):\n \"\"\"Uses aiohttp to asynchronously get a url response, or multiple.\"\"\"\n\n async def fetch(url, read_method='text'):\n if not url: # Why\n return (None, None)\n async with session.get(str(url)) as response:\n return (\n response.status,\n (await getattr(response, read_method)()) if read_response else response)\n\n read_method = 'read' if get_bytes else 'text'\n try:\n async with aiohttp.ClientSession(headers=headers, loop=bot.loop) as session:\n if isinstance(urls, (list, tuple)):\n result = await parallelize(fetch(url, read_method) for url in urls)\n else:\n result = await fetch(urls, read_method)\n return result\n except Exception as e:\n raise CBException(\"Failed to retrieve a URL.\", e=e)\n\n\nasync def request(\n bot, method, url, session_kwargs={}, method_kwargs={},\n response_method='text', response_method_kwargs={}):\n \"\"\"Wraps aiohttp methods for making a request.\"\"\"\n async with aiohttp.ClientSession(**session_kwargs) as session:\n async with getattr(session, method)(url, **method_kwargs) as response:\n return (response, await getattr(response, response_method)(**response_method_kwargs))\n\n\nasync def upload_to_discord(bot, fp, filename=None, rewind=True, close=False):\n \"\"\"Uploads the given file-like object to the upload channel.\n\n If the upload channel is specified in the configuration files, files\n will be uploaded there. Otherwise, a new guild will be created, and\n used as the upload channel.\"\"\"\n channel_id = configurations.get(bot, 'core', 'upload_channel')\n if not channel_id: # Check to see if a guild was already created\n channel_id = data.get(bot, 'core', 'upload_channel')\n channel = data.get_channel(bot, channel_id, safe=True)\n\n # TODO: Remove. Guild creation via bots is a whitelisted process\n if channel is None: # Create guild\n logger.debug(\"Creating guild for upload channel...\")\n try:\n guild = await bot.create_guild('uploads')\n except Exception as e:\n raise CBException(\n \"Failed to create upload guild. This bot is not whitelisted \"\n \"to create guilds.\", e=e)\n data.add(bot, 'core', 'upload_channel', guild.id)\n channel = bot.get_channel(guild.id)\n\n if channel is None: # Shouldn't happen\n raise CBException(\"Failed to get upload channel.\")\n\n try:\n discord_file = discord.File(fp, filename=filename)\n message = await channel.send(file=discord_file)\n upload_url = message.attachments[0].url\n except Exception as e:\n raise CBException(\"Failed to upload file.\", e=e)\n\n try:\n if close:\n fp.close()\n elif rewind:\n fp.seek(0)\n except:\n pass\n\n return upload_url\n\n\nasync def upload_logs(bot):\n \"\"\"Uploads any log files to the debug channel.\"\"\"\n log_zip_location = '{0}/temp/debug_log_files.zip'.format(bot.path)\n log_zip_file = zipfile.ZipFile(log_zip_location, mode='w')\n log_location = '{0}/temp/debug_logs.txt'.format(bot.path)\n compression = zipfile.ZIP_DEFLATED\n if os.path.exists(log_location):\n log_zip_file.write(\n log_location, arcname=os.path.basename(log_location),\n compress_type=compression)\n for log_number in range(5):\n next_location = log_location + '.{}'.format(log_number + 1)\n if os.path.exists(next_location):\n log_zip_file.write(\n next_location, arcname=os.path.basename(next_location),\n compress_type=compression)\n log_zip_file.close()\n\n debug_channel = bot.get_channel(configurations.get(bot, 'core', 'debug_channel'))\n discord_file = discord.File(log_zip_location, filename='all_logs.zip')\n await debug_channel.send(content='All logs:', file=discord_file)\n\n\nasync def parallelize(coroutines, return_exceptions=False, propagate_error=False):\n \"\"\"Uses asyncio.gather to \"parallelize\" the coroutines (not really).\"\"\"\n try:\n return await asyncio.gather(*coroutines, return_exceptions=return_exceptions)\n except Exception as e:\n if propagate_error:\n raise e\n else:\n raise CBException(\"Failed to await coroutines.\", e=e)\n\n\ndef future(function, *args, **kwargs):\n \"\"\"Returns the given function as a future.\"\"\"\n loop = asyncio.get_event_loop()\n function = functools.partial(function, *args, **kwargs)\n return loop.run_in_executor(None, function)\n\n\n# TODO: Deprecate in favor of clean_text\ndef get_cleaned_filename(name, cleaner=False, limit=200, extension=None):\n \"\"\"Cleans up the filename to a limited set of ASCII characters.\"\"\"\n if extension:\n extension = '.{}'.format(extension)\n limit -= len(extension)\n else:\n extension = ''\n cleaned_list = []\n for char in name:\n if cleaner: # Does not include underscores or dashes\n if char.isalnum():\n cleaned_list.append(char)\n else:\n if char.isalnum() or ord(char) in (95, 45):\n cleaned_list.append(char)\n if len(cleaned_list) > limit: # Because Windows file limitations\n cleaned_list = cleaned_list[:limit]\n return ''.join(cleaned_list).lower() + extension\n\n\ndef clean_text(text, level=2, limit=200, custom=None, lowercase=True):\n \"\"\"Cleans up the text to a limited set of ASCII characters.\n\n level 0: Standard ASCII characters or alphanumeric unicode\n level 1: Alphanumeric (unicode) or dash, underscore, space\n level 2: Alphanumeric (unicode) or dash, underscore (default)\n level 3: Alphanumeric (unicode) only\n level 4: Alphanumeric (ASCII) only\n \"\"\"\n if custom:\n sifter = custom\n else:\n sifter = (\n lambda x: x if (x.isalnum() or 32 <= ord(x) <= 126) else '',\n lambda x: x if (x.isalnum() or ord(x) in (95, 45, 32)) else '',\n lambda x: x if (x.isalnum() or ord(x) in (95, 45)) else '',\n lambda x: x if x.isalnum() else '',\n lambda x: x if (x.isalnum() and ord(x) < 127) else ''\n )[level]\n cleaned = ''.join(sifter(char) for char in text[:limit])\n return cleaned.lower() if lowercase else cleaned\n\n\ndef filter_everyone(text):\n \"\"\"Removes mentionable instances of @everyone and @here.\"\"\"\n return text.replace('@everyone', '@\\u200beveryone').replace('@here', '@\\u200bhere')\n\n\ndef get_player(bot, guild_id):\n \"\"\"Gets the voice player on the given guild. None otherwise.\"\"\"\n return data.get(bot, 'core', 'voice_player', guild_id=guild_id, volatile=True)\n\n\ndef set_player(bot, guild_id, player):\n \"\"\"Sets the voice player of the given guild.\"\"\"\n data.add(bot, 'core', 'voice_player', player, guild_id=guild_id, volatile=True)\n\n\nasync def join_and_ready(bot, voice_channel, is_mod=False, reconnect=False):\n \"\"\"Joins the voice channel and stops any audio playing.\n\n Returns the voice_client object from voice_channel.connect()\n \"\"\"\n guild = voice_channel.guild\n muted_channels = data.get(bot, 'core', 'muted_channels', guild_id=guild.id, default=[])\n if voice_channel == guild.afk_channel:\n raise CBException(\"This is the AFK channel.\")\n if voice_channel.id in muted_channels and not is_mod:\n raise CBException(\"The bot is muted in this voice channel.\")\n if reconnect:\n try:\n await stop_audio(bot, guild)\n except:\n pass\n\n voice_client = guild.voice_client\n if not voice_client:\n try:\n voice_client = await asyncio.wait_for(\n voice_channel.connect(timeout=5.0, reconnect=False),\n timeout=10.0, loop=bot.loop)\n except asyncio.TimeoutError as e:\n try:\n await stop_audio(bot, guild, force=True)\n except:\n pass\n raise CBException(\"Timed out trying to join the voice channel.\")\n except Exception as e:\n try:\n await stop_audio(bot, guild)\n except:\n pass\n raise CBException(\"Failed to join the voice channel.\", e=e)\n if voice_client.is_playing():\n voice_client.stop()\n else:\n if voice_client.is_playing():\n voice_client.stop()\n if voice_client.channel != voice_channel:\n try:\n await voice_client.move_to(voice_channel)\n except Exception as e:\n try:\n await stop_audio(bot, guild)\n except:\n pass\n raise CBException(\"Failed to move to the voice channel.\", e=e)\n\n return voice_client\n\n\nasync def stop_audio(bot, guild, member=None, safe=True, disconnect=True, force=False):\n \"\"\"Stops any playing audio.\n\n Keyword arguments:\n member -- Checks that the the bot is connected to the member's\n voice channel. The safe option overrides this.\n safe -- Prevents exceptions from being thrown. Can be seen as 'silent'.\n disconnect -- Disconnects from the voice channel.\n force -- If disconnect is set, forces the disconnect.\n \"\"\"\n voice_client = guild.voice_client\n if not voice_client:\n if safe:\n return\n else:\n raise CBException(\"Bot not connected to a voice channel.\")\n member_voice = member.voice.channel if member and member.voice else None\n if member and voice_client.channel != member_voice:\n if not safe:\n raise CBException(\"Bot not connected to your voice channel.\")\n else:\n voice_client.stop()\n if disconnect:\n await voice_client.disconnect(force=force)\n\n\nasync def play_and_leave(bot, guild, audio_source, delay=30):\n \"\"\"Plays the audio source, and then leaves the voice channel.\n\n If the delay is negative, the bot will not leave the voice channel.\n \"\"\"\n voice_client = guild.voice_client\n if voice_client is None:\n raise CBException(\"Voice client is missing.\")\n\n async def _leave():\n await asyncio.sleep(delay)\n test_voice_client = guild.voice_client\n if not test_voice_client or test_voice_client.source != audio_source:\n logger.debug(\"Voice client changed. Automatic disconnect cancelled.\")\n else:\n try:\n await voice_client.disconnect()\n except Exception as e:\n raise CBException(\"Failed to disconnect from the voice channel.\", e=e)\n\n def _start_leave(error):\n if error:\n raise CBException(\"Player failed to finish.\", error)\n elif delay >= 0:\n asyncio.ensure_future(_leave(), loop=bot.loop)\n\n voice_client.play(audio_source, after=_start_leave)\n\n\ndef get_time_string(total_seconds, text=False, full=False, resolution=2):\n \"\"\"Gets either digital-clock-like time or time in plain English.\"\"\"\n total_seconds = int(total_seconds)\n values = [\n #('weeks', int(total_seconds / 604800)), # Weeks are more confusing than days\n ('days', int(total_seconds / 86400)),\n ('hours', int((total_seconds % 86400) / 3600)),\n ('minutes', int((total_seconds % 3600) / 60)),\n ('seconds', int(total_seconds % 60))\n ]\n result = []\n\n if text:\n for scale, value in values:\n if value > 0:\n if not full and len(result) == 1 and values[0][1] >= 7:\n break # Lower resolution if there are several days already\n result.append('{} {}{}'.format(\n value, scale[:-1], '' if value == 1 else 's'))\n if not full and len(result) >= resolution:\n break\n for it in range(len(result) - 2):\n result.insert((it * 2) + 1, ', ')\n if len(result) > 1:\n result.insert(-1, ' and ')\n\n else:\n for scale, value in values:\n if value > 0 or full or scale == 'minutes':\n if scale in ('hours', 'minutes', 'seconds') and full:\n format_string = '{:0>2}'\n else:\n format_string = '{}'\n result.append(format_string.format(value))\n full = True\n\n return ('' if text else ':').join(result)\n\n\ndef get_formatted_message(message):\n \"\"\"Gets a log-friendly format of the given message.\"\"\"\n if message.edited_at:\n edited = ' (edited {})'.format(message.edited_at)\n else:\n edited = ''\n if message.attachments:\n urls = [attachment.url for attachment in message.attachments]\n attached = ' (attached {})'.format(urls)\n else:\n attached = ''\n return (\"{0.author.name}#{0.author.discriminator} ({0.author.id}) \"\n \"at {0.created_at}{1}{2}:\\r\\n\\t{0.content}\").format(\n message, edited, attached)\n\n\nasync def get_log_text(bot, channel, **log_arguments):\n \"\"\"Wrapper function for Carter's time machine.\"\"\"\n messages = []\n async for message in channel.history(**log_arguments):\n messages.append(message)\n return '\\r\\n\\r\\n'.join(get_formatted_message(message) for message in reversed(messages))\n\n\nasync def send_text_as_file(channel, text, filename, extra=None, extension='txt'):\n \"\"\"Sends the given text as a text file.\"\"\"\n discord_file = discord.File(\n get_text_as_file(text), filename='{}.{}'.format(filename, extension))\n reference = await channel.send(content=extra, file=discord_file)\n return reference\n\n\ndef get_text_as_file(text):\n \"\"\"Converts the text into a bytes object using BytesIO.\"\"\"\n try:\n return io.BytesIO(bytes(str(text), 'utf-8'))\n except Exception as e:\n raise CBException(\"Failed to convert text to a file.\", e=e)\n\n\ndef get_invoker(bot, guild=None, message=None):\n \"\"\"Gets a suitable command invoker for the bot.\n\n If a guild is specified, this will check for a custom invoker and\n whether or not mention mode is enabled.\n If a message is specified, this will obtain a guild as long as the message\n was not sent in a private channel.\n \"\"\"\n if message and isinstance(message.channel, discord.TextChannel):\n guild = message.guild\n if guild:\n guild_data = data.get(\n bot, 'core', None, guild_id=guild.id, default={})\n if guild_data.get('mention_mode', False):\n invoker = '{} '.format(guild.me.display_name)\n else:\n invoker = guild_data.get('command_invoker', None)\n else:\n invoker = None\n if invoker is None:\n invoker = bot.command_invokers[0]\n return invoker\n\n\nasync def notify_owners(bot, message, user_id=None):\n \"\"\"Sends all owners a direct message with the given text.\n\n If user_id is specified, this will check that the user is not in the\n blacklist.\n \"\"\"\n if bot.selfbot:\n logger.info(\"Owner notification:\\n{}\".format(message))\n else:\n if user_id:\n blacklist = data.get(bot, 'core', 'blacklist', default=[])\n if user_id in blacklist:\n await asyncio.sleep(0.5)\n return\n for owner in bot.owners:\n try:\n user = await bot.fetch_user(owner)\n if len(message) > 1990:\n await send_text_as_file(user, message, 'notification')\n else:\n await user.send(message)\n except Exception as e:\n logger.error(\"Failed to notify owner %s: %s\", owner, e)\n\n\ndef docker_send_command(command):\n \"\"\"Sends the database Docker container a command.\"\"\"\n logger.debug(\"Sending database container command: %s\", command)\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.settimeout(0.1)\n s.connect(('db', 2345))\n s.send(bytes(command, 'ascii'))\n s.close()\n\n\ndef docker_receive_exit_code():\n \"\"\"Waits until an exit code is returned from the database Docker container.\"\"\"\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.settimeout(30)\n s.bind(('', 2345))\n s.listen(1)\n connection, _ = s.accept()\n response = connection.recv(64)\n connection.close()\n s.close()\n logger.debug(\"Database container response: %s\", response)\n return response\n\n\n# TODO: Add specific table dumping\ndef db_backup(bot, safe=True):\n \"\"\"Use the Docker setup to backup the database.\"\"\"\n if not bot.docker_mode:\n return\n try:\n logger.debug(\"Attemping to connect to the database container...\")\n if bot.dump_exclusions:\n exclusions = '-T \"' + '\" -T \"'.join(bot.dump_exclusions) + '\"'\n else:\n exclusions = ''\n command = (\n 'pg_dump -U postgres -F c {} postgres > '\n '/external/data/db_dump'.format(exclusions))\n docker_send_command(command)\n logger.debug(\"Told database container to backup\")\n except Exception as e:\n logger.warn(\"Failed to communicate with the database container: %s\", e)\n if safe:\n return\n raise CBException(\"Failed to communicate with the database container.\", e=e)\n\n # Read response code from database container\n try:\n return docker_receive_exit_code()\n except Exception as e:\n logger.warn(\"Failed to receive a response from the database container: %s\", e)\n if safe:\n return\n raise CBException(\"Failed to receive a response from the database container.\", e=e)\n\n\ndef make_backup(bot):\n \"\"\"Makes a backup of the data directory.\"\"\"\n logger.info(\"Making backup...\")\n db_backup(bot)\n backup_indices = '{0}/temp/backup{{}}.zip'.format(bot.path)\n if os.path.isfile(backup_indices.format(5)):\n os.remove(backup_indices.format(5))\n for it in range(1, 5):\n backup_file_from = backup_indices.format(5-it)\n backup_file_to = backup_indices.format(6-it)\n if os.path.isfile(backup_file_from):\n os.rename(backup_file_from, backup_file_to)\n shutil.make_archive(backup_indices.format(1)[:-4], 'zip', '{}/data'.format(bot.path))\n logger.info(\"Finished making backup.\")\n\n\ndef restore_backup(bot, backup_file):\n \"\"\"Restores a backup file given the backup filename.\"\"\"\n logger.info(\"Restoring from a backup file...\")\n try:\n core.bot_data = {'global_users': {}, 'global_plugins': {}}\n core.volatile_data = {'global_users': {}, 'global_plugins': {}}\n shutil.unpack_archive(backup_file, '{}/data'.format(bot.path))\n data.check_all(bot)\n data.load_data(bot)\n except Exception as e:\n raise CBException(\"Failed to extract backup.\", e=e)\n logger.info(\"Finished data restore.\")\n\n\ndef restore_db_backup(bot, tables=[]):\n \"\"\"Restores a database dump backup file.\n\n If tables is specified, this will restore those instead of the entire database.\n \"\"\"\n logger.info(\"Restoring database...\")\n try:\n if tables:\n specific_tables = '-t \"' + '\" -t \"'.join(tables) + '\"'\n else:\n specific_tables = ''\n command = 'pg_restore -U postgres -d postgres {} /external/temp/db_dump'.format(specific_tables)\n docker_send_command(command)\n return docker_receive_exit_code()\n except Exception as e:\n raise CBException(\"Failed to restore backup.\", e=e)\n logger.info(\"Finished database restore.\")\n\n\ndef get_timezone_offset(bot, guild_id=None, utc_dt=None, utc_seconds=None, as_string=False):\n \"\"\"Converts the time to a guild's (guessed) local time.\n \n Keyword arguments:\n guild_id -- Retrieves the configured timezone of the given guild, or\n guesses it based on the voice server region.\n utc_dt -- A timezone-naive datetime object that gets shifted by the offset.\n utc_seconds -- An integer value that gets shifted by the offset.\n as_string -- The UTC offset is returned as a UTC+X string instead of an integer value.\n\n If either utc_dt or utc_seconds are specified, the return type will be a tuple of two\n elements. The first element is the offset value, the second element is the\n shifted datetime object or seconds value.\n \"\"\"\n if guild_id is None:\n offset = 0\n else:\n offset = data.get(bot, 'core', 'timezone', guild_id=guild_id)\n if offset is None:\n guild = bot.get_guild(guild_id)\n offset = VOICE_REGIONS.get(str(guild.region), 0)\n if 'us-' in str(guild.region): # Apply DST offset\n if utc_dt and utc_dt.dst():\n in_dst = utc_dt.timetuple().tm_isdst > 0\n else:\n in_dst = time.localtime(time.time()).tm_isdst > 0\n if in_dst:\n offset += 1\n if as_string:\n result = 'UTC{}'.format(('+' + str(offset)) if offset >= 0 else offset)\n else:\n result = offset\n if utc_dt: # Convert UTC datetime object to \"local\" time\n return (result, utc_dt + datetime.timedelta(hours=offset))\n if utc_seconds is not None: # Convert UTC seconds to offset\n return (result, utc_seconds + (3600 * offset))\n else:\n return result\n\n\ndef get_schedule_entries(\n bot, plugin_name, search=None, destination=None, custom_match=None, custom_args=[]):\n \"\"\"Gets the entries given the search or match arguments.\"\"\"\n if custom_match:\n where_arg = custom_match\n input_args = custom_args\n else:\n where_arg = 'plugin = %s'\n input_args = [plugin_name]\n if search is not None:\n where_arg += ' AND search = %s'\n input_args.append(search)\n if destination is not None:\n where_arg += ' AND destination = %s'\n input_args.append(destination)\n\n cursor = data.db_select(\n bot, from_arg='schedule', where_arg=where_arg,\n additional='ORDER BY time ASC', input_args=input_args, safe=False)\n return cursor.fetchall()\n\n\ndef remove_schedule_entries(\n bot, plugin_name, search=None, destination=None, custom_match=None, custom_args=[]):\n \"\"\"Removes the entries given the search or match arguments.\"\"\"\n if custom_match:\n where_arg = custom_match\n input_args = custom_args\n else:\n where_arg = 'plugin = %s'\n input_args = [plugin_name]\n if search is not None:\n where_arg += ' AND search = %s'\n input_args.append(search)\n if destination is not None:\n where_arg += ' AND destination = %s'\n input_args.append(destination)\n return data.db_delete(bot, 'schedule', where_arg=where_arg, input_args=input_args)\n\n\ndef update_schedule_entries(\n bot, plugin_name, search=None, destination=None, function=None,\n payload=None, new_search=None, new_time=None, new_destination=None,\n info=None, custom_match=None, custom_args=[]):\n \"\"\"Updates the schedule entry with the given fields.\n\n If any field is left as None, it will not be changed.\n If custom_match is given, it must be a proper WHERE SQL clause. Otherwise\n it will look for a direct match with search.\n\n Returns the number of entries modified.\n \"\"\"\n if custom_match:\n where_arg = custom_match\n input_args = custom_args\n else:\n where_arg = 'plugin = %s'\n input_args = [plugin_name]\n if search is not None:\n where_arg += ' AND search = %s'\n input_args.append(search)\n if destination is not None:\n where_arg += ' AND destination = %s'\n input_args.append(destination)\n\n set_args = []\n set_input_args = []\n if function:\n set_args.append('function=%s')\n set_input_args.append(function.__name__)\n if payload:\n set_args.append('payload=%s')\n set_input_args.append(Json(payload))\n if new_time is not None:\n set_args.append('time=%s')\n set_input_args.append(int(new_time))\n if new_search is not None:\n set_args.append('search=%s')\n set_input_args.append(new_search)\n if new_destination:\n set_args.append('destination=%s')\n set_input_args.append(new_destination)\n if info is not None:\n set_args.append('info=%s')\n set_input_args.append(info)\n set_arg = ', '.join(set_args)\n input_args = set_input_args + input_args\n data.db_update(bot, 'schedule', set_arg=set_arg, where_arg=where_arg, input_args=input_args)\n asyncio.ensure_future(_start_scheduler(bot))\n\n\ndef schedule(\n bot, plugin_name, scheduled_time, function, payload=None,\n search=None, destination=None, info=None):\n \"\"\"Adds the entry to the schedule table and starts the timer.\n\n It should be noted that the function CANNOT be a lambda function. It must\n be a function residing in the top level of the plugin.\n Time should be a number in seconds from the epoch.\n\n The asynchronous function should take 6 arguments:\n bot -- An instance of the bot.\n scheduled_time -- Time at which the given function should be called.\n payload -- Same as the keyword argument.\n search -- Same as the keyword argument.\n destination -- Same as the keyword argument.\n late -- Whether or not the function was called late due to bot downtime.\n info -- Same as the keyword argument.\n id -- Unique ID assigned to the entry when it was created. Usually unused.\n\n Keyword arguments:\n payload -- Standard json-serializable dictionary\n search -- Used to assist in later deletion or modification\n destination -- Starts with either a 'c' or 'u', then the ID of the channel or user\n This is used to help determine what will need to be messaged.\n info -- Used as a description for the scheduled event if `!base notifications` is used.\n \"\"\"\n input_args = [\n int(scheduled_time),\n plugin_name,\n function.__name__,\n Json(payload),\n search,\n destination,\n info\n ]\n data.db_insert(bot, 'schedule', input_args=input_args, safe=False)\n asyncio.ensure_future(_start_scheduler(bot))\n\n\ndef get_messageable(bot, destination):\n \"\"\"Takes a destination in the schedule table format and returns a messageable.\"\"\"\n try:\n if destination[0] == 'u': # User\n get = bot.get_user\n elif destination[0] == 'c': # Channel\n get = bot.get_channel\n else:\n raise CBException(\"Must be either a user `u` or channel `c`.\")\n return get(int(destination[1:]))\n except Exception as e:\n raise CBException(\"Invalid destination format.\", e=e)\n\n\nasync def _schedule_timer(bot, entry, delay):\n task_comparison = bot.schedule_timer\n await asyncio.sleep(0.5)\n logger.debug(\"Scheduler sleeping for %s seconds...\", delay)\n await asyncio.sleep(delay)\n if task_comparison is not bot.schedule_timer:\n logger.debug(\"_schedule_timer was not cancelled! Cancelling this scheduler...\")\n return\n if int(time.time() + 1) < entry.time:\n logger.warn(\"_schedule_timer was about to delete the entry early! Restarting loop...\")\n asyncio.ensure_future(_start_scheduler(bot))\n return\n try:\n deleted = data.db_delete(\n bot, 'schedule', where_arg='id=%s', input_args=[entry.id], safe=False)\n except Exception as e:\n logger.warn(\"_schedule_timer failed to delete a schedule entry. %s\", e)\n if deleted:\n try:\n logger.debug(\"_schedule_timer done sleeping for %s seconds!\", delay)\n function = getattr(bot.plugins[entry.plugin], entry.function)\n late = delay < -60\n asyncio.ensure_future(function(\n bot, entry.time, entry.payload, entry.search,\n entry.destination, late, entry.info, entry.id))\n except Exception as e:\n logger.warn(\"Failed to execute scheduled function: %s\", e)\n asyncio.ensure_future(_start_scheduler(bot))\n\n\nasync def _start_scheduler(bot):\n \"\"\"Starts the interal scheduler.\"\"\"\n await bot.wait_until_ready()\n if bot.schedule_timer: # Scheduler already running\n bot.schedule_timer.cancel()\n bot.schedule_timer = None\n cursor = data.db_select(\n bot, from_arg='schedule', additional='ORDER BY time ASC', limit=1, safe=False)\n result = cursor.fetchone()\n if result:\n delta = result.time - time.time()\n logger.debug(\"Starting scheduled event %s\", result.id)\n bot.schedule_timer = asyncio.ensure_future(_schedule_timer(bot, result, delta))\n else:\n logger.debug(\"No pending scheduled event available.\")\n","repo_name":"jkchen2/JshBot","sub_path":"jshbot/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":39071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10451758672","text":"# 1644 소수의 연속합\n\nimport sys\nimport math\ninput = sys.stdin.readline\n\ndef isPrime(target):\n if target <=1:\n return False\n if target % 2 == 0:\n return True if target == 2 else False\n for i in range(3,int(math.sqrt(target))+1,2):\n if target % i == 0:\n return False\n return True\n\n\ndef sol():\n N = int(input())\n if N == 1:\n print(0)\n return\n\n answer = 0\n # 자기 자신이 소수일 경우\n prime_arr = []\n for target in range(N+1):\n if isPrime(target):\n prime_arr.append(target)\n\n # left index, right index, 누적 합\n left,right= 0, 0\n acc_sum = prime_arr[left]\n\n # 먼저 right가 증가하고 acc_sum[right]에 들어가면 idx error 을 겪는 경우 존재.\n while left <= right:\n if acc_sum < N:\n right += 1\n if right < len(prime_arr):\n acc_sum += prime_arr[right]\n else:\n break\n elif acc_sum > N :\n acc_sum -= prime_arr[left]\n left += 1\n else:\n answer += 1\n acc_sum -= prime_arr[left]\n left += 1\n\n\n print(answer)\nsol()\n","repo_name":"inkyu0103/BOJ","sub_path":"two pointer/1644.py","file_name":"1644.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3798393291","text":"new_list = [5, \"lane\", \"chips\", \"incense\", 70, 96, \"helm\", 45, 32, \"large\", 'small']\r\n# for items in new_list:\r\n# if type(items) == int:\r\n# if items >= 6:\r\n# print(items)\r\n# else:\r\n# print(\"none\")\r\n\r\n# OR\r\nfor items in new_list:\r\n if str(items).isnumeric() and items > 6:\r\n print(items)\r\n","repo_name":"ap4ashutosh/python-codes-101","sub_path":"Exercise3.py","file_name":"Exercise3.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6226182939","text":"from sqlalchemy.orm import sessionmaker\n\nfrom models import get_engine, QaInfo, Triples\n\n\ndef get_session():\n engine = get_engine()\n db_session = sessionmaker(bind=engine)\n return db_session()\n\n\ndef add_info(qa):\n session = get_session()\n session.add(qa)\n session.commit()\n print(\"add info succeed\")\n session.close()\n\n\ndef add_triples(triples):\n session = get_session()\n session.add(triples)\n session.commit()\n print(\"add triples succeed\")\n session.close()\n\n\ndef add_other_query(oq):\n session = get_session()\n session.add(oq)\n session.commit()\n print(\"add oq succeed\")\n session.close()\n\n\ndef query_qa_all():\n session = get_session()\n qa_info = session.query(QaInfo).filter(QaInfo.adopt > 0).all()\n session.close()\n return qa_info\n\n\ndef query_qa_one(entity=None, relation=None):\n session = get_session()\n qa_info = session.query(QaInfo).filter(QaInfo.head == entity, QaInfo.relation == relation,\n QaInfo.adopt > 0).order_by(QaInfo.start_time.desc()).first()\n session.close()\n if qa_info:\n return qa_info.answer\n else:\n return ''\n\n\ndef query_qa_maxid():\n session = get_session()\n maxid = session.query(QaInfo).order_by(QaInfo.ID.desc()).first()\n session.close()\n return maxid.ID\n\n\ndef query_qa_id(id):\n session = get_session()\n qa_info = session.query(QaInfo).filter_by(ID=id).first()\n session.close()\n return qa_info\n\n\ndef query_triples_id(id):\n session = get_session()\n triples = session.query(Triples).filter(Triples.info_id == id).all()\n session.close()\n return triples\n\n\ndef query_qa_label(label):\n session = get_session()\n qa_info = session.query(QaInfo).filter(QaInfo.label == label).order_by(QaInfo.start_time.desc()).all()\n session.close()\n return qa_info\n\n\ndef query_qa_keyword(keyword):\n session = get_session()\n qa_info = session.query(QaInfo).filter(QaInfo.query.like('%{0}%'.format(keyword))).all()\n session.close()\n return qa_info\n\n\ndef query_qa_list():\n session = get_session()\n qa_info = session.query(QaInfo).filter(QaInfo.adopt > 0).limit(6).all()\n session.close()\n return qa_info\n\n\ndef query_qa_re_counte(label=None):\n session = get_session()\n if label != None:\n count = session.query(QaInfo).filter(QaInfo.label == label, QaInfo.adopt > 0).count()\n else:\n count = session.query(QaInfo).filter(QaInfo.adopt > 0).count()\n session.close()\n return count\n\n\ndef print_all(infos):\n answer = [info.answer for info in infos]\n return answer\n\n\ndef update_user(id):\n session = get_session()\n session.query(QaInfo).filter(QaInfo.ID == id).update({\"adopt\": 1})\n session.commit()\n session.close()\n\n\ndef delete_user(id):\n session = get_session()\n session.query(QaInfo).filter(QaInfo.id == id).delete()\n session.commit()\n session.close()\n\n\nif __name__ == '__main__':\n # init_db()\n\n # add_user(lisi)\n # answer = query_qa_one()\n # print(answer)\n\n # new = QaInfo('2022-05-14', '小柴胡汤能够治疗感冒吗?', '小柴胡汤', 'PRE', '主治', '可以', 0)\n # add_info(new)\n # triples = Triples(2, '', '', '', '', '', '')\n # add_triples(triples)\n\n # fqa_list = query_qa_list()\n # for fqa in fqa_list:\n # print(\"{0},{1},{2}\".format(fqa.id, fqa.head, fqa.label))\n\n # qa_detail = query_qa_re_counte('主治')\n # for qa in qa_detail:\n # print(qa.id, qa.query)\n\n # qa_list = query_qa_label('MED')\n # for qa in qa_list:\n # print(qa.id, qa.query)\n\n # qa_list = query_qa_keyword('蜂蜜')\n # for qa in qa_list:\n # print(qa.id, qa.query)\n\n # maxid = query_qa_maxid()\n # print(maxid)\n\n # triples = query_triples_id(36)\n # result = {}\n # head = set()\n # s = []\n #\n # for t in triples:\n # result = {}\n # result['head'] = str(t.head)\n # result['head_label'] = str(t.head_label)\n # head.add(result['head'])\n # s.append(result)\n #\n # s = json.dumps(s, ensure_ascii=False)\n # hr = json.dumps(list(head), ensure_ascii=False)\n #\n # print(triples)\n # print(result)\n # print(s)\n # print(head)\n # print(hr)\n\n count = query_qa_re_counte()\n print(count)\n","repo_name":"wangweichang0820/TCM_KBQA","sub_path":"dao.py","file_name":"dao.py","file_ext":"py","file_size_in_byte":4272,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"10756605532","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 22 21:22:16 2020\n\n@author: Vijit Kanjilal\n\"\"\"\n\nimport numpy as np\nimport h5py\nimport matplotlib.pyplot as plt\n#plt.switch_backend('agg')\nfrom scipy import interpolate\n\n\n\nt=[0,7,17,33,66,97]\nt1=[0.0,1.0,3.0,6.0,12.0,18.0]\n\ndx=1.0\nbinning=100\n\nkB = 1.3807e-16 #Boltzman's Constant in CGS\nmp= 1.67e-24\nUNIT_DENSITY= mp\n\nX, Y, Z = 0.7154, 0.2703, 0.0143\nmu = 1/(2*X+0.75*Y+0.5625*Z)\nmue = 2./(1+X)\nmui = 1/(1/mu-1/mue)\ngamma = 5./3\n\n\n\nx1 = np.zeros(binning, dtype=np.float64)\ny1 = np.zeros(binning, dtype=np.float64)\n \nx2 = np.zeros(binning, dtype=np.float64)\ny2= np.zeros(binning, dtype=np.float64)\n \nx3 = np.zeros(binning, dtype=np.float64)\ny3= np.zeros(binning, dtype=np.float64)\n \nx4 = np.zeros(binning, dtype=np.float64)\ny4= np.zeros(binning, dtype=np.float64)\n \nx5 = np.zeros(binning, dtype=np.float64)\ny5= np.zeros(binning, dtype=np.float64)\n\nx6 = np.zeros(binning, dtype=np.float64)\ny6= np.zeros(binning, dtype=np.float64)\n\nsigma1, sigma2, sigma3, sigma4, sigma5, sigma6 = None, None, None, None, None, None \n\n\ncooling = np.loadtxt('cooltable.dat') #solar metallicity\nLAMBDA = interpolate.interp1d(cooling[:,0], cooling[:,1], fill_value=\"extrapolate\")\n\nTmax=-1e20\nTmin=1e20\n\nfor i in range(6):\n f = h5py.File('paper_data_new/data.%04d.dbl.h5'%i,'r') \n T = np.log10(np.array(f['Timestep_%d/vars/T'%t[i]]).flatten())\n Tmax=np.maximum(Tmax,np.max(T))\n Tmin=np.minimum(Tmin,np.min(T))\n #just for the user to know which file is currently in use\n print(i)\n \nprint(Tmin)\nprint(Tmax)\n\nTmax=Tmax+0.01\n\nfor i in range(6):\n f = h5py.File('paper_data_new/data.%04d.dbl.h5'%i,'r') \n rho = (np.array(f['Timestep_%d/vars/rho'%t[i]])).flatten()\n T = np.log10(np.array(f['Timestep_%d/vars/T'%t[i]])).flatten()\n print(i)\n \n #sorting\n T, rho = (list(t) for t in zip(*sorted(zip(T, rho))))\n rho = np.array(rho)\n T = np.array(T)\n #sorting done\n \n\n bins=np.linspace(Tmin,Tmax,binning+1)\n \n cr = np.zeros(binning, dtype=np.float64)\n \n bins_i=bins[0]\n bins_o=bins[1]\n\n j=0\n \n for k in range(len(T)):\n ticket=0\n ne = rho[k]*UNIT_DENSITY/(mue*mp)\n ni = rho[k]*UNIT_DENSITY/(mui*mp)\n \n \n while(ticket==0):\n if (bins_i<=T[k] None:\n if not link.is_wheel:\n return\n wheel = Wheel(link.filename)\n if wheel.supported(self._finder.target_python.get_tags()):\n return\n msg = f\"{link.filename} is not a supported wheel on this platform.\"\n raise UnsupportedWheel(msg)\n def _make_extras_candidate(self, base, extras):\n cache_key = (id(base), extras)\n try:\n candidate = self._extras_candidate_cache[cache_key]\n except KeyError:\n candidate = ExtrasCandidate(base, extras)\n self._extras_candidate_cache[cache_key] = candidate\n return candidate\n def _make_candidate_from_dist(\n self,\n dist,\n extras,\n template,\n ):\n try:\n base = self._installed_candidate_cache[dist.key]\n except KeyError:\n base = AlreadyInstalledCandidate(dist, template, factory=self)\n self._installed_candidate_cache[dist.key] = base\n if not extras:\n return base\n return self._make_extras_candidate(base, extras)\n def _make_candidate_from_link(\n self,\n link,\n extras,\n template,\n name,\n version,\n ):\n if link in self._build_failures:\n return None\n if template.editable:\n if link not in self._editable_candidate_cache:\n try:\n self._editable_candidate_cache[link] = EditableCandidate(\n link,\n template,\n factory=self,\n name=name,\n version=version,\n )\n except (InstallationSubprocessError, MetadataInconsistent) as e:\n logger.warning(\"Discarding %s. %s\", link, e)\n self._build_failures[link] = e\n return None\n base = self._editable_candidate_cache[link]\n else:\n if link not in self._link_candidate_cache:\n try:\n self._link_candidate_cache[link] = LinkCandidate(\n link,\n template,\n factory=self,\n name=name,\n version=version,\n )\n except (InstallationSubprocessError, MetadataInconsistent) as e:\n logger.warning(\"Discarding %s. %s\", link, e)\n self._build_failures[link] = e\n return None\n base = self._link_candidate_cache[link]\n if not extras:\n return base\n return self._make_extras_candidate(base, extras)\n def _iter_found_candidates(\n self,\n ireqs: Sequence[InstallRequirement],\n specifier: SpecifierSet,\n hashes: Hashes,\n prefers_installed: bool,\n incompatible_ids: Set[int],\n ) -> Iterable[Candidate]:\n if not ireqs:\n return ()\n template = ireqs[0]\n assert template.req, \"Candidates found on index must be PEP 508\"\n name = canonicalize_name(template.req.name)\n extras = frozenset()\n for ireq in ireqs:\n assert ireq.req, \"Candidates found on index must be PEP 508\"\n specifier &= ireq.req.specifier\n hashes &= ireq.hashes(trust_internet=False)\n extras |= frozenset(ireq.extras)\n def _get_installed_candidate() -> Optional[Candidate]:\n if self._force_reinstall:\n return None\n try:\n installed_dist = self._installed_dists[name]\n except KeyError:\n return None\n if not specifier.contains(installed_dist.version, prereleases=True):\n return None\n candidate = self._make_candidate_from_dist(\n dist=installed_dist,\n extras=extras,\n template=template,\n )\n if id(candidate) in incompatible_ids:\n return None\n return candidate\n def iter_index_candidate_infos():\n result = self._finder.find_best_candidate(\n project_name=name,\n specifier=specifier,\n hashes=hashes,\n )\n icans = list(result.iter_applicable())\n all_yanked = all(ican.link.is_yanked for ican in icans)\n for ican in reversed(icans):\n if not all_yanked and ican.link.is_yanked:\n continue\n func = functools.partial(\n self._make_candidate_from_link,\n link=ican.link,\n extras=extras,\n template=template,\n name=name,\n version=ican.version,\n )\n yield ican.version, func\n return FoundCandidates(\n iter_index_candidate_infos,\n _get_installed_candidate(),\n prefers_installed,\n incompatible_ids,\n )\n def _iter_explicit_candidates_from_base(\n self,\n base_requirements: Iterable[Requirement],\n extras: FrozenSet[str],\n ) -> Iterator[Candidate]:\n for req in base_requirements:\n lookup_cand, _ = req.get_candidate_lookup()\n if lookup_cand is None:\n continue\n base_cand = as_base_candidate(lookup_cand)\n assert base_cand is not None, \"no extras here\"\n yield self._make_extras_candidate(base_cand, extras)\n def _iter_candidates_from_constraints(\n self,\n identifier: str,\n constraint: Constraint,\n template: InstallRequirement,\n ) -> Iterator[Candidate]:\n for link in constraint.links:\n self._fail_if_link_is_unsupported_wheel(link)\n candidate = self._make_candidate_from_link(\n link,\n extras=frozenset(),\n template=install_req_from_link_and_ireq(link, template),\n name=canonicalize_name(identifier),\n version=None,\n )\n if candidate:\n yield candidate\n def find_candidates(\n self,\n identifier: str,\n requirements: Mapping[str, Iterator[Requirement]],\n incompatibilities: Mapping[str, Iterator[Candidate]],\n constraint: Constraint,\n prefers_installed: bool,\n ) -> Iterable[Candidate]:\n explicit_candidates = set()\n ireqs = []\n for req in requirements[identifier]:\n cand, ireq = req.get_candidate_lookup()\n if cand is not None:\n explicit_candidates.add(cand)\n if ireq is not None:\n ireqs.append(ireq)\n with contextlib.suppress(InvalidRequirement):\n parsed_requirement = PackagingRequirement(identifier)\n explicit_candidates.update(\n self._iter_explicit_candidates_from_base(\n requirements.get(parsed_requirement.name, ()),\n frozenset(parsed_requirement.extras),\n ),\n )\n if ireqs:\n try:\n explicit_candidates.update(\n self._iter_candidates_from_constraints(\n identifier,\n constraint,\n template=ireqs[0],\n ),\n )\n except UnsupportedWheel:\n return ()\n incompat_ids = {id(c) for c in incompatibilities.get(identifier, ())}\n if not explicit_candidates:\n return self._iter_found_candidates(\n ireqs,\n constraint.specifier,\n constraint.hashes,\n prefers_installed,\n incompat_ids,\n )\n return (\n c\n for c in explicit_candidates\n if id(c) not in incompat_ids\n and constraint.is_satisfied_by(c)\n and all(req.is_satisfied_by(c) for req in requirements[identifier])\n )\n def make_requirement_from_install_req(self, ireq, requested_extras):\n if not ireq.match_markers(requested_extras):\n logger.info(\n \"Ignoring %s: markers '%s' don't match your environment\",\n ireq.name,\n ireq.markers,\n )\n return None\n if not ireq.link:\n return SpecifierRequirement(ireq)\n self._fail_if_link_is_unsupported_wheel(ireq.link)\n cand = self._make_candidate_from_link(\n ireq.link,\n extras=frozenset(ireq.extras),\n template=ireq,\n name=canonicalize_name(ireq.name) if ireq.name else None,\n version=None,\n )\n if cand is None:\n if not ireq.name:\n raise self._build_failures[ireq.link]\n return UnsatisfiableRequirement(canonicalize_name(ireq.name))\n return self.make_requirement_from_candidate(cand)\n def make_requirement_from_candidate(self, candidate):\n return ExplicitRequirement(candidate)\n def make_requirement_from_spec(\n self,\n specifier,\n comes_from,\n requested_extras=(),\n ):\n ireq = self._make_install_req_from_spec(specifier, comes_from)\n return self.make_requirement_from_install_req(ireq, requested_extras)\n def make_requires_python_requirement(self, specifier):\n if self._ignore_requires_python or specifier is None:\n return None\n return RequiresPythonRequirement(specifier, self._python_candidate)\n def get_wheel_cache_entry(self, link, name):\n if self._wheel_cache is None or self.preparer.require_hashes:\n return None\n return self._wheel_cache.get_cache_entry(\n link=link,\n package_name=name,\n supported_tags=get_supported(),\n )\n def get_dist_to_uninstall(self, candidate):\n dist = self._installed_dists.get(candidate.project_name)\n if dist is None:\n return None\n if not self._use_user_site:\n return dist\n if dist_in_usersite(dist):\n return dist\n if running_under_virtualenv() and dist_in_site_packages(dist):\n raise InstallationError(\n \"Will not install to the user site because it will \"\n \"lack sys.path precedence to {} in {}\".format(\n dist.project_name,\n dist.location,\n )\n )\n return None\n def _report_requires_python_error(self, causes):\n assert causes, \"Requires-Python error reported with no cause\"\n version = self._python_candidate.version\n if len(causes) == 1:\n specifier = str(causes[0].requirement.specifier)\n message = (\n f\"Package {causes[0].parent.name!r} requires a different \"\n f\"Python: {version} not in {specifier!r}\"\n )\n return UnsupportedPythonVersion(message)\n message = f\"Packages require a different Python. {version} not in:\"\n for cause in causes:\n package = cause.parent.format_for_error()\n specifier = str(cause.requirement.specifier)\n message += f\"\\n{specifier!r} (required by {package})\"\n return UnsupportedPythonVersion(message)\n def _report_single_requirement_conflict(self, req, parent):\n if parent is None:\n req_disp = str(req)\n else:\n req_disp = f\"{req} (from {parent.name})\"\n cands = self._finder.find_all_candidates(req.project_name)\n versions = [str(v) for v in sorted({c.version for c in cands})]\n logger.critical(\n \"Could not find a version that satisfies the requirement %s \"\n \"(from versions: %s)\",\n req_disp,\n \", \".join(versions) or \"none\",\n )\n return DistributionNotFound(f\"No matching distribution found for {req}\")\n def get_installation_error(\n self,\n e,\n constraints,\n ):\n assert e.causes, \"Installation error reported with no cause\"\n requires_python_causes = [\n cause\n for cause in e.causes\n if isinstance(cause.requirement, RequiresPythonRequirement)\n and not cause.requirement.is_satisfied_by(self._python_candidate)\n ]\n if requires_python_causes:\n return self._report_requires_python_error(\n cast(\"Sequence[ConflictCause]\", requires_python_causes),\n )\n if len(e.causes) == 1:\n req, parent = e.causes[0]\n if req.name not in constraints:\n return self._report_single_requirement_conflict(req, parent)\n def text_join(parts):\n if len(parts) == 1:\n return parts[0]\n return \", \".join(parts[:-1]) + \" and \" + parts[-1]\n def describe_trigger(parent):\n ireq = parent.get_install_requirement()\n if not ireq or not ireq.comes_from:\n return f\"{parent.name}=={parent.version}\"\n if isinstance(ireq.comes_from, InstallRequirement):\n return str(ireq.comes_from.name)\n return str(ireq.comes_from)\n triggers = set()\n for req, parent in e.causes:\n if parent is None:\n trigger = req.format_for_error()\n else:\n trigger = describe_trigger(parent)\n triggers.add(trigger)\n if triggers:\n info = text_join(sorted(triggers))\n else:\n info = \"the requested packages\"\n msg = (\n \"Cannot install {} because these package versions \"\n \"have conflicting dependencies.\".format(info)\n )\n logger.critical(msg)\n msg = \"\\nThe conflict is caused by:\"\n relevant_constraints = set()\n for req, parent in e.causes:\n if req.name in constraints:\n relevant_constraints.add(req.name)\n msg = msg + \"\\n \"\n if parent:\n msg = msg + f\"{parent.name} {parent.version} depends on \"\n else:\n msg = msg + \"The user requested \"\n msg = msg + req.format_for_error()\n for key in relevant_constraints:\n spec = constraints[key].specifier\n msg += f\"\\n The user requested (constraint) {key}{spec}\"\n msg = (\n msg\n + \"\\n\\n\"\n + \"To fix this you could try to:\\n\"\n + \"1. loosen the range of package versions you've specified\\n\"\n + \"2. remove package versions to allow pip attempt to solve \"\n + \"the dependency conflict\\n\"\n )\n logger.info(msg)\n return DistributionNotFound(\n \"ResolutionImpossible: for help visit \"\n \"https://pip.pypa.io/en/latest/user_guide/\"\n )\n","repo_name":"Mockingbird01001/NLG-code-generator-LSTM","sub_path":"work/data/data_model/batch_1/378.py.transformed.py.transformed.py","file_name":"378.py.transformed.py.transformed.py","file_ext":"py","file_size_in_byte":18542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28745445220","text":"import os\nos.chdir('/home/mark/projects/Nonlinear-Programming')\nimport NonlinearProgramming as nlpg\nfrom NonlinearProgramming import functions as fun_nlpg\nfrom numpy import triu\n\nnp = fun_nlpg.np\n\n\ndef to_sp_hess(hess):\n hess_triu = triu(H)\n nnzs = hess_triu.nonzero()\n num_idx = nnzs[0].shape\n return [num_idx[0],nnzs[0],nnzs[1],H[nnzs]]\n\n# test that the sparse function works\n# did a test using other code\n\nx_cute = np.ones(7)\ncute = fun_nlpg.Func_Object(nlpg.functions.make_cute_func(x_cute),\n x_cute)\n\n\nsp_H=cute.sparse_hessian(x_cute)\nH=cute.hessian(x_cute)\n\n##\n# Need an array testing scheme right here\n#\n#\n","repo_name":"markstoehr/Nonlinear-Programming","sub_path":"test_sparse_mult.py","file_name":"test_sparse_mult.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"37807264448","text":"from collections import defaultdict\n\ndata = []\nwhile True:\n try:\n data.append(input())\n except:\n break\n\ncnt = defaultdict(int)\nfor line in data:\n line = line.replace(\"\\t\", \" \")\n for token in line.split(\" \"):\n cnt[token] += 1\n \nres = list([(-v, k) for k, v in cnt.items()])\nres = sorted(res)\nfor (v, k) in res:\n print('{} {}'.format(-v, k))","repo_name":"amoshyc/ccu-data-engineering","sub_path":"hw2/course/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"42619940668","text":"import datetime\nimport operator\nimport os\nimport re\nimport sys\nimport traceback\nfrom threading import Thread, Timer\nfrom subprocess import *\nimport time\n\nimport wmiirc\nfrom wmiirc import *\n\nbackground = '#333333'\nfloatbackground='#222222'\n\nwmii['font'] = 'drift,-*-fixed-medium-r-*-*-12-*-*-*-*-*-*-*'\n\n# Colors tuples: \" \"\nwmii['focuscolors'] = '#ffffff', '#555555', '#777777'\nwmii['normcolors'] = '#bbbbbb', '#222222', '#000000'\nwmii['alertcolors'] = '#ff0000', '#222222', '#777777'\nwmii['border'] = 1\n\nkeys.bind('main', (\n \"Tag actions\",\n ('%(mod)s-slash', \"Change to another tag\",\n lambda k: tags.select(tag_menu())),\n ('%(mod)s-Shift-slash', \"Retag the selected client\",\n lambda k: setattr(Client('sel'), 'tags', tag_menu())),\n ('%(mod)s-space', \"Open program menu\",\n lambda k: program_menu()),\n ('%(mod)s-p', \"Move to the view to the right\",\n lambda k: tags.select(tags.next(True))),\n ('%(mod)s-Shift-p', \"Move to the view to the right, take along current client\",\n lambda k: tags.select(tags.next(True), take_client=Client('sel'))),\n ('%(mod)s-b', \"Toggle between floating and managed layers\",\n lambda k: Tag('sel').select('toggle')),\n ('%(mod)s-Shift-b', \"Toggle selected client between floating and managed layers\",\n lambda k: Tag('sel').send(Client('sel'), 'toggle')),\n))\n\n@defmonitor\ndef time(self):\n return wmii.cache['focuscolors'], datetime.datetime.now().strftime('%a %b %d %I:%M:%S %Y')\n\ndef get_battery_status(percent):\n equal = '=' * (int(percent) / 10)\n minus = '-' * (10 - int(percent) / 10)\n return '[%s%s] %d.0' % (equal, minus, percent)\n\n@defmonitor\ndef battery(self):\n power_dir = '/sys/class/power_supply/BAT0/'\n\n if os.path.exists(power_dir):\n charge_now_filename = os.path.join(power_dir, 'charge_now')\n charge_now = open(charge_now_filename).readline().strip()\n\n charge_full_filename = os.path.join(power_dir, 'charge_full')\n charge_full = open(charge_full_filename).readline().strip()\n\n percent = min((float(charge_now) / float(charge_full) * 100), 100.0)\n\n battery_status = get_battery_status(percent)\n\n if percent < 10:\n return wmii.cache['alertcolors'], battery_status\n else:\n return wmii.cache['focuscolors'], battery_status\n else:\n return wmii.cache['focuscolors'], get_battery_status(100)\n","repo_name":"nonameentername/dotfiles","sub_path":"wmii-hg/.wmii-hg/wmiirc_local.py","file_name":"wmiirc_local.py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"38508170756","text":"from collections import namedtuple\nfrom django.urls import reverse\n\n\n# read data from database and save them in dict format\ndef dictfetchall(cursor):\n \"Return all rows from a cursor as a dict\"\n columns = [col[0] for col in cursor.description]\n return [dict(zip(columns, row)) for row in cursor.fetchall()]\n\n\n# read data from database and save them in obj format\n# def namedtuplefetchall(cursor):\n# \"Return all rows from a cursor as a namedtuple\"\n# desc = cursor.description\n# nt_result = namedtuple(\"Result\", [col[0] for col in desc])\n# return [nt_result(*row) for row in cursor.fetchall()]\n\n\n# test if the user is authed\ndef auth_test(source, url):\n response = source.client.get(reverse(url))\n source.assertEquals(response.status_code, 302)\n\n # login\n source.client.post(\n reverse(\"users:login\"),\n data={\"email\": source.email_login, \"password\": source.password},\n )\n\n\ndef get_client_ip(request):\n x_forwarded_for = request.META.get(\"HTTP_X_FORWARDED_FOR\")\n if x_forwarded_for:\n ip = x_forwarded_for.split(\",\")[-1].strip()\n else:\n ip = request.META.get(\"REMOTE_ADDR\")\n return ip\n","repo_name":"gcivil-nyu-org/S2022-Team-4-repo","sub_path":"home_fix/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"41798759166","text":"from dataclasses import dataclass, field\nfrom typing import Any, List, Optional, Tuple\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\n\nfrom srl.base.define import EnvObservationTypes, RLTypes\nfrom srl.base.rl.base import RLParameter, RLTrainer\nfrom srl.base.rl.config import RLConfig\nfrom srl.base.rl.processor import Processor\nfrom srl.base.rl.registration import register\nfrom srl.base.rl.worker_rl import RLWorker\nfrom srl.base.rl.worker_run import WorkerRun\nfrom srl.rl.functions.common import render_discrete_action\nfrom srl.rl.functions.common_tf import compute_kl_divergence, compute_kl_divergence_normal, compute_logprob\nfrom srl.rl.memories.experience_replay_buffer import ExperienceReplayBuffer, ExperienceReplayBufferConfig\nfrom srl.rl.models.image_block import ImageBlockConfig\nfrom srl.rl.models.mlp_block import MLPBlockConfig\nfrom srl.rl.models.tf.input_block import InputBlock\nfrom srl.rl.processors.image_processor import ImageProcessor\n\nkl = keras.layers\n\n\"\"\"\nPaper\nhttps://arxiv.org/abs/1707.06347\nhttps://arxiv.org/abs/2005.12729\n\nClipped Surrogate Objective : o\nAdaptive KL Penalty : o\nGAE : o\nOther\n Value Clipping : o\n Reward scaling : o\n Orthogonal initialization and layer scaling: x\n Adam learning rate annealing : o\n Reward Clipping : o\n Observation Normalization : o\n Observation Clipping : o\n Hyperbolic tan activations : x\n Global Gradient Clipping : o\n\"\"\"\n\n\n# ------------------------------------------------------\n# config\n# ------------------------------------------------------\n@dataclass\nclass Config(RLConfig, ExperienceReplayBufferConfig):\n # model\n image_block: ImageBlockConfig = field(init=False, default_factory=lambda: ImageBlockConfig())\n hidden_block: MLPBlockConfig = field(init=False, default_factory=lambda: MLPBlockConfig())\n value_block: MLPBlockConfig = field(init=False, default_factory=lambda: MLPBlockConfig())\n policy_block: MLPBlockConfig = field(init=False, default_factory=lambda: MLPBlockConfig())\n\n experience_collection_method: str = \"MC\" # \"\" or \"MC\" or \"GAE\"\n gae_discount: float = 0.9 # GAEの割引率\n\n baseline_type: str = \"ave\" # \"\" or \"ave\" or \"std\" or \"normal\" or \"advantage\"\n surrogate_type: str = \"clip\" # \"\" or \"clip\" or \"kl\"\n clip_range: float = 0.2 # 状態価値のクリップ範囲\n adaptive_kl_target: float = 0.01 # Adaptive KLペナルティ内の定数\n\n batch_size: int = 32\n memory_warmup_size: int = 1000\n discount: float = 0.9 # 割引率\n optimizer_initial_lr: float = 0.02 # 初期学習率\n optimizer_final_lr: float = 0.01 # 終了学習率\n optimizer_lr_step: float = 200 * 10 # 終了学習率になるまでの更新回数\n value_loss_weight: float = 1.0 # 状態価値の反映率\n entropy_weight: float = 0.1 # エントロピーの反映率\n\n enable_state_normalized: bool = True # 状態の正規化\n enable_value_clip: float = True # 価値関数もclipするか\n global_gradient_clip_norm: float = 0.5 # 勾配のL2におけるclip値(0で無効)\n\n enable_action_normalization: bool = True # アクションの正規化\n state_clip: Optional[Tuple[float, float]] = None # 状態のclip(Noneで無効、(-10,10)で指定)\n reward_clip: Optional[Tuple[float, float]] = None # 報酬のclip(Noneで無効、(-10,10)で指定)\n\n enable_stable_gradients: bool = True # 勾配爆発の対策\n \"\"\" 勾配爆発の対策, 平均、分散、ランダムアクションで大きい値を出さないようにclipする \"\"\"\n stable_gradients_max_stddev: float = 2\n\n def __post_init__(self):\n super().__post_init__()\n\n self.memory.capacity = 2000\n self.hidden_block.set_mlp((64, 64))\n self.value_block.set_mlp((64,))\n self.policy_block.set_mlp((64,))\n\n @property\n def base_action_type(self) -> RLTypes:\n return RLTypes.ANY\n\n @property\n def base_observation_type(self) -> RLTypes:\n return RLTypes.CONTINUOUS\n\n def get_use_framework(self) -> str:\n return \"tensorflow\"\n\n def set_processor(self) -> List[Processor]:\n return [\n ImageProcessor(\n image_type=EnvObservationTypes.GRAY_2ch,\n resize=(84, 84),\n enable_norm=True,\n )\n ]\n\n def getName(self) -> str:\n return \"PPO\"\n\n def assert_params(self) -> None:\n super().assert_params()\n assert self.memory_warmup_size <= self.memory.capacity\n assert self.batch_size < self.memory_warmup_size\n\n @property\n def info_types(self) -> dict:\n return {\n \"policy_loss\": {},\n \"value_loss\": {},\n \"entropy_loss\": {},\n \"lr\": {\"data\": \"last\"},\n }\n\n\nregister(\n Config(),\n __name__ + \":RemoteMemory\",\n __name__ + \":Parameter\",\n __name__ + \":Trainer\",\n __name__ + \":Worker\",\n)\n\n\n# ------------------------------------------------------\n# RemoteMemory\n# ------------------------------------------------------\nclass RemoteMemory(ExperienceReplayBuffer):\n pass\n\n\n# ------------------------------------------------------\n# network\n# ------------------------------------------------------\nclass _ActorCriticNetwork(keras.Model):\n def __init__(self, config: Config):\n super().__init__()\n self.config = config\n\n if config.action_type == RLTypes.CONTINUOUS and config.enable_stable_gradients:\n if config.enable_action_normalization:\n self.mean_low = -1\n self.mean_high = 1\n self.action_range = 2\n else:\n # mean の範囲はactionの取りうる範囲\n self.mean_low = config.action_low\n self.mean_high = config.action_high\n self.action_range = config.action_high - config.action_low\n\n # Orthogonal initialization and layer scaling\n kernel_initializer = \"orthogonal\"\n\n # input\n self.in_block = InputBlock(config.observation_shape, config.env_observation_type)\n\n # image\n if self.in_block.use_image_layer:\n self.image_block = config.image_block.create_block_tf(enable_time_distributed_layer=False)\n self.image_flatten = kl.Flatten()\n\n # --- hidden block\n self.hidden_block = config.hidden_block.create_block_tf()\n\n # --- value\n self.value_block = config.value_block.create_block_tf()\n self.value_layer = kl.Dense(1, kernel_initializer=kernel_initializer)\n\n # --- policy\n self.policy_block = config.policy_block.create_block_tf()\n if self.config.action_type == RLTypes.DISCRETE:\n self.out_layer = kl.Dense(config.action_num, activation=\"softmax\", kernel_initializer=kernel_initializer)\n elif self.config.action_type == RLTypes.CONTINUOUS:\n self.pi_mean_layer = kl.Dense(\n config.action_num,\n activation=\"linear\",\n kernel_initializer=kernel_initializer,\n bias_initializer=\"truncated_normal\",\n )\n self.pi_stddev_layer = kl.Dense(\n config.action_num,\n activation=\"linear\",\n kernel_initializer=kernel_initializer,\n bias_initializer=\"truncated_normal\",\n )\n else:\n raise ValueError(self.action_type)\n\n # build\n self.build((None,) + config.observation_shape)\n\n def call(self, state, training=False):\n x = self.in_block(state, training=training)\n if self.in_block.use_image_layer:\n x = self.image_block(x, training=training)\n x = self.image_flatten(x)\n x = self.hidden_block(x, training=training)\n\n v = self.value_block(x)\n v = self.value_layer(v)\n\n p = self.policy_block(x)\n if self.config.action_type == RLTypes.DISCRETE:\n prob = self.out_layer(p)\n return v, prob\n elif self.config.action_type == RLTypes.CONTINUOUS:\n mean = self.pi_mean_layer(p)\n stddev = self.pi_stddev_layer(p)\n\n # σ > 0\n stddev = tf.exp(stddev)\n\n if self.config.enable_stable_gradients:\n mean = tf.clip_by_value(mean, self.mean_low, self.mean_high)\n stddev = tf.clip_by_value(stddev, 0, self.config.stable_gradients_max_stddev)\n\n return v, mean, stddev\n\n def policy(self, state):\n if self.config.action_type == RLTypes.DISCRETE:\n v, probs = self.call(state.reshape(1, -1))\n prob = probs.numpy()[0]\n action = np.random.choice(self.config.action_num, 1, p=prob)[0]\n return v.numpy()[0], prob, action\n elif self.config.action_type == RLTypes.CONTINUOUS:\n v, mean, stddev = self(state.reshape((1, -1)))\n\n # ガウス分布に従った乱数をだす\n action = tf.random.normal(mean.shape, mean=mean, stddev=stddev)\n\n if self.config.enable_stable_gradients:\n action = tf.clip_by_value(\n action,\n mean - self.action_range,\n mean + self.action_range,\n )\n\n return (\n v.numpy()[0],\n mean.numpy()[0],\n stddev.numpy()[0],\n action.numpy()[0],\n )\n\n def build(self, input_shape):\n self.__input_shape = input_shape\n super().build(self.__input_shape)\n\n def summary(self, name: str = \"\", **kwargs):\n if hasattr(self.in_block, \"init_model_graph\"):\n self.in_block.init_model_graph()\n if self.in_block.use_image_layer and hasattr(self.image_block, \"init_model_graph\"):\n self.image_block.init_model_graph()\n if hasattr(self.hidden_block, \"init_model_graph\"):\n self.hidden_block.init_model_graph()\n\n x = kl.Input(shape=self.__input_shape[1:])\n name = self.__class__.__name__ if name == \"\" else name\n model = keras.Model(inputs=x, outputs=self.call(x), name=name)\n model.summary(**kwargs)\n\n\n# ------------------------------------------------------\n# Parameter\n# ------------------------------------------------------\nclass Parameter(RLParameter):\n def __init__(self, *args):\n super().__init__(*args)\n self.config: Config = self.config\n\n self.model = _ActorCriticNetwork(self.config)\n\n # Adaptive KL penalty\n self.adaptive_kl_beta = 0.5\n\n def call_restore(self, data: Any, **kwargs) -> None:\n self.model.set_weights(data[0])\n self.adaptive_kl_beta = data[1]\n\n def call_backup(self, **kwargs) -> Any:\n return [\n self.model.get_weights(),\n self.adaptive_kl_beta,\n ]\n\n def summary(self, **kwargs):\n self.model.summary(**kwargs)\n\n\n# ------------------------------------------------------\n# Trainer\n# ------------------------------------------------------\nclass Trainer(RLTrainer):\n def __init__(self, *args):\n super().__init__(*args)\n self.config: Config = self.config\n self.parameter: Parameter = self.parameter\n self.remote_memory: RemoteMemory = self.remote_memory\n\n self.train_count = 0\n\n self.optimizer = keras.optimizers.Adam(learning_rate=self.config.optimizer_initial_lr)\n\n def get_train_count(self):\n return self.train_count\n\n def train(self):\n if self.remote_memory.length() < self.config.memory_warmup_size:\n return {}\n batchs = self.remote_memory.sample(self.config.batch_size)\n\n states = np.asarray([e[\"state\"] for e in batchs])\n advantage = np.asarray([e[\"discounted_reward\"] for e in batchs]).reshape((-1, 1))\n old_v = np.asarray([e[\"v\"] for e in batchs])\n\n # --- 状態の正規化\n if self.config.enable_state_normalized:\n states = (states - np.mean(states, axis=0, keepdims=True)) / (np.std(states, axis=0, keepdims=True) + 1e-8)\n\n # --- baseline\n if self.config.baseline_type == \"\" or self.config.baseline_type == \"none\":\n pass\n elif self.config.baseline_type == \"ave\":\n advantage -= np.mean(advantage)\n elif self.config.baseline_type == \"std\":\n advantage = advantage / (np.std(advantage) + 1e-8)\n elif self.config.baseline_type == \"normal\":\n advantage = (advantage - np.mean(advantage)) / (np.std(advantage) + 1e-8)\n elif self.config.baseline_type == \"advantage\":\n pass\n else:\n raise ValueError(\"baseline_type fail. ['none', 'ave', 'std', 'normal', 'advantage]\")\n\n if self.config.action_type == RLTypes.DISCRETE:\n actions = np.asarray([e[\"action\"] for e in batchs])\n old_probs = np.asarray([e[\"prob\"] for e in batchs])\n\n # アクションをonehotベクトルの形に変形\n onehot_actions = tf.one_hot(actions, self.config.action_num).numpy()\n\n # old_pi\n old_pi = tf.reduce_sum(onehot_actions * old_probs, axis=1, keepdims=True)\n old_logpi = np.log(old_pi)\n else:\n actions = np.asarray([e[\"action\"] for e in batchs])\n old_logpi = np.asarray([e[\"logpi\"] for e in batchs])\n if self.config.surrogate_type == \"kl\":\n old_mean = np.asarray([e[\"mean\"] for e in batchs])\n old_stddev = np.asarray([e[\"stddev\"] for e in batchs])\n\n # --- Qモデルの学習\n with tf.GradientTape() as tape:\n if self.config.action_type == RLTypes.DISCRETE:\n v, new_probs = self.parameter.model(states, training=True)\n\n # π(a|s)とlog(π(a|s))を計算\n new_pi = tf.reduce_sum(onehot_actions * new_probs, axis=1, keepdims=True)\n new_logpi = tf.math.log(tf.clip_by_value(new_pi, 1e-8, 1.0))\n\n else:\n v, new_mean, new_stddev = self.parameter.model(states, training=True)\n new_logpi = compute_logprob(new_mean, new_stddev, actions)\n new_pi = tf.exp(new_logpi)\n\n # (new_pi / old_pi) で計算するとnanになりやすい\n ratio = tf.exp(new_logpi - old_logpi)\n\n # advantage\n if self.config.baseline_type == \"advantage\":\n advantage = advantage - tf.stop_gradient(v)\n\n if self.config.surrogate_type == \"clip\":\n # Clipped Surrogate Objective\n ratio_clipped = tf.clip_by_value(ratio, 1 - self.config.clip_range, 1 + self.config.clip_range)\n\n # loss の計算\n loss_unclipped = ratio * advantage\n loss_clipped = ratio_clipped * advantage\n\n # 小さいほうを採用\n policy_loss = tf.minimum(loss_unclipped, loss_clipped)\n\n elif self.config.surrogate_type == \"kl\":\n if self.config.action_type == RLTypes.DISCRETE:\n kl = compute_kl_divergence(old_probs, new_probs)\n else:\n kl = compute_kl_divergence_normal(old_mean, old_stddev, new_mean, new_stddev)\n policy_loss = ratio * advantage - self.parameter.adaptive_kl_beta * kl\n elif self.config.surrogate_type == \"\":\n policy_loss = ratio * advantage\n else:\n raise ValueError(self.config.surrogate_type)\n\n # --- Value loss\n if self.config.enable_value_clip:\n # clipする場合\n v_clipped = tf.clip_by_value(v, old_v - self.config.clip_range, old_v + self.config.clip_range)\n value_loss = tf.maximum((v - advantage) ** 2, (v_clipped - advantage) ** 2)\n else:\n # clipしない場合\n value_loss = (v - advantage) ** 2\n\n # --- 方策エントロピー\n entropy_loss = tf.reduce_sum(new_pi * new_logpi, axis=1, keepdims=True)\n\n # --- total loss\n policy_loss = -policy_loss\n value_loss = self.config.value_loss_weight * value_loss\n entropy_loss = -self.config.entropy_weight * entropy_loss\n\n loss = tf.reduce_mean(policy_loss + value_loss + entropy_loss) # ミニバッチ\n loss += tf.reduce_sum(self.parameter.model.losses) # 正則化項\n\n grads = tape.gradient(loss, self.parameter.model.trainable_variables)\n if self.config.global_gradient_clip_norm != 0:\n grads, _ = tf.clip_by_global_norm(grads, self.config.global_gradient_clip_norm)\n self.optimizer.apply_gradients(zip(grads, self.parameter.model.trainable_variables))\n\n info = {\n \"policy_loss\": np.mean(policy_loss.numpy()),\n \"value_loss\": np.mean(value_loss.numpy()),\n \"entropy_loss\": np.mean(entropy_loss.numpy()),\n }\n\n # KLペナルティβの調整\n if self.config.surrogate_type == \"kl\":\n kl_mean = tf.reduce_mean(kl).numpy()\n if kl_mean < self.config.adaptive_kl_target / 1.5:\n self.parameter.adaptive_kl_beta /= 2\n elif kl_mean > self.config.adaptive_kl_target * 1.5:\n self.parameter.adaptive_kl_beta *= 2\n\n info[\"kl_mean\"] = kl_mean\n info[\"kl_beta\"] = self.parameter.adaptive_kl_beta\n # nanになる場合は adaptive_kl_target が小さすぎる可能性あり\n\n # 学習率を減少\n if self.train_count > self.config.optimizer_lr_step:\n lr = self.config.optimizer_final_lr\n else:\n lr = self.config.optimizer_initial_lr - (\n self.config.optimizer_initial_lr - self.config.optimizer_final_lr\n ) * (self.train_count / self.config.optimizer_lr_step)\n self.optimizer.lr = lr\n info[\"lr\"] = lr\n\n self.train_count += 1\n return info\n\n\n# ------------------------------------------------------\n# Worker\n# ------------------------------------------------------\nclass Worker(RLWorker):\n def __init__(self, *args):\n super().__init__(*args)\n self.config: Config = self.config\n self.parameter: Parameter = self.parameter\n self.remote_memory: RemoteMemory = self.remote_memory\n\n if self.config.enable_action_normalization:\n self.action_center = (self.config.action_space.high + self.config.action_space.low) / 2\n self.action_scale = self.config.action_space.high - self.action_center\n\n def call_on_reset(self, worker: WorkerRun) -> dict:\n self.recent_batch = []\n self.recent_rewards = []\n self.recent_next_states = []\n return {}\n\n def call_policy(self, worker: WorkerRun) -> Tuple[Any, dict]:\n state = worker.state\n if self.config.state_clip is not None:\n state = np.clip(state, self.config.state_clip[0], self.config.state_clip[1])\n\n if self.config.action_type == RLTypes.DISCRETE:\n v, prob, action = self.parameter.model.policy(state)\n action = int(action)\n self.batch = {\n \"state\": state,\n \"action\": action,\n \"prob\": prob,\n \"v\": v,\n }\n else:\n v, mean, stddev, action = self.parameter.model.policy(state)\n logpi = compute_logprob(mean.reshape((-1, 1)), stddev.reshape((-1, 1)), action.reshape((-1, 1)))\n self.batch = {\n \"state\": state,\n \"action\": action,\n \"v\": v,\n \"logpi\": logpi[0],\n }\n\n if self.config.enable_action_normalization:\n action = action * self.action_scale + self.action_center\n\n # safety action\n action = np.clip(action, self.config.action_low, self.config.action_high)\n action = action.tolist()\n\n if self.config.surrogate_type == \"kl\":\n self.batch[\"mean\"] = mean\n self.batch[\"stddev\"] = stddev\n if self.rendering:\n self.batch[\"mean\"] = mean\n self.batch[\"stddev\"] = stddev\n self.batch[\"env_action\"] = action\n\n return action, {}\n\n def call_on_step(self, worker: WorkerRun) -> dict:\n if not self.training:\n return {}\n\n reward = worker.reward\n\n # 報酬のclip\n if self.config.reward_clip is not None:\n if reward < self.config.reward_clip[0]:\n reward = self.config.reward_clip[0]\n elif reward > self.config.reward_clip[1]:\n reward = self.config.reward_clip[1]\n\n if self.config.experience_collection_method == \"GAE\":\n next_state = worker.state\n if self.config.state_clip is not None:\n next_state = np.clip(next_state, self.config.state_clip[0], self.config.state_clip[1])\n self.recent_next_states.append(next_state)\n\n self.recent_rewards.append(reward)\n self.recent_batch.append(self.batch)\n\n if worker.done:\n if self.config.experience_collection_method == \"MC\":\n mc_r = 0\n for i in reversed(range(len(self.recent_batch))):\n r = self.recent_rewards[i]\n mc_r = r + self.config.discount * mc_r\n\n batch = self.recent_batch[i]\n batch[\"discounted_reward\"] = mc_r\n self.remote_memory.add(batch)\n\n elif self.config.experience_collection_method == \"GAE\":\n if self.config.action_type == RLTypes.DISCRETE:\n v, _ = self.parameter.model(np.asarray([e[\"state\"] for e in self.recent_batch]))\n n_v, _ = self.parameter.model(np.asarray(self.recent_next_states))\n else:\n v, _, _ = self.parameter.model(np.asarray([e[\"state\"] for e in self.recent_batch]))\n n_v, _, _ = self.parameter.model(np.asarray(self.recent_next_states))\n v = v.numpy().reshape((-1,))\n n_v = n_v.numpy().reshape((-1,))\n gae = 0\n for i in reversed(range(len(self.recent_batch))):\n batch = self.recent_batch[i]\n\n if i == len(self.recent_batch) - 1:\n delta = self.recent_rewards[i] - v[i]\n else:\n delta = self.recent_rewards[i] + self.config.discount * n_v[i] - v[i]\n gae = delta + self.config.discount * self.config.gae_discount * gae\n batch[\"discounted_reward\"] = gae\n self.remote_memory.add(batch)\n\n else:\n raise ValueError(self.config.experience_collection_method)\n\n return {}\n\n def render_terminal(self, worker, **kwargs) -> None:\n v = self.batch[\"v\"]\n print(f\"V: {v[0]:.5f}\")\n\n if self.config.action_type == RLTypes.DISCRETE:\n prob = self.batch[\"prob\"]\n maxa = np.argmax(prob)\n\n def _render_sub(a: int) -> str:\n s = \"{:8.3f}%\".format(prob[a])\n return s\n\n render_discrete_action(maxa, worker.env, self.config, _render_sub)\n else:\n action = self.batch[\"action\"]\n env_action = self.batch[\"env_action\"]\n pi = np.exp(self.batch[\"logpi\"])\n mean = self.batch[\"mean\"]\n stddev = self.batch[\"stddev\"]\n\n print(f\"mean : {mean}\")\n print(f\"stddev : {stddev}\")\n print(f\"action : {action}\")\n print(f\"env_action: {env_action}\")\n print(f\"pi : {pi}\")\n","repo_name":"pocokhc/simple_distributed_rl","sub_path":"srl/algorithms/ppo.py","file_name":"ppo.py","file_ext":"py","file_size_in_byte":23713,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"21"} +{"seq_id":"22691433613","text":"import functools\n\ndef chuanHoa(s):\n arr = s.strip().split()\n res = \"\"\n for i in arr:\n res += i + \" \"\n return res.strip()\n\nclass KhachHang:\n def __init__ (self, maKhachHang, tenKhachHang, loaiHoGD, chiSoDau, chiSoCuoi):\n self.maKhachHang = \"KH\" + str.format(\"%02d\" % maKhachHang)\n self.tenKhachHang = chuanHoa(tenKhachHang).title()\n self.loaiHoGD = loaiHoGD\n self.chiSoDau = chiSoDau\n self.chiSoCuoi = chiSoCuoi\n \n def dinhMuc(self):\n if self.loaiHoGD == \"A\":\n return 100\n if self.loaiHoGD == \"B\":\n return 500\n return 200\n \n def tienTrongDinhMuc(self):\n res = self.chiSoCuoi - self.chiSoDau\n if res < self.dinhMuc():\n return res * 450\n return self.dinhMuc() * 450\n \n def tienVuotDinhMuc(self):\n res = self.chiSoCuoi - self.chiSoDau\n if res > self.dinhMuc():\n return (res - self.dinhMuc()) * 1000\n return 0\n \n def thueVAT(self):\n return self.tienVuotDinhMuc() // 20\n \n def tienPhaiNop(self):\n return self.tienTrongDinhMuc() + self.tienVuotDinhMuc() + self.thueVAT()\n \n def inRa(self):\n print(self.maKhachHang, self.tenKhachHang, self.tienTrongDinhMuc(), self.tienVuotDinhMuc(), self.thueVAT(), self.tienPhaiNop())\n \ndef cmp(a,b):\n if a.tienPhaiNop() > b.tienPhaiNop():\n return -1\n return 1\n\nn = int(input())\nlistKH = []\nfor i in range(n):\n s1 = input().strip()\n s2 = input().strip().split()\n x = KhachHang(i + 1, s1, s2[0], int(s2[1]), int(s2[2]))\n listKH.append(x)\n \nlistKH = sorted(listKH, key = functools.cmp_to_key(cmp))\nfor i in listKH:\n i.inRa()\n \n# 2\n# nGuyEn Hong Ngat\n# C 200 278\n# Chu thi minh\n# A 120 160\n\n","repo_name":"nguyenvantu11052002/Python-PTIT","sub_path":"PYKT095 - TÍNH TIỀN ĐIỆN.py","file_name":"PYKT095 - TÍNH TIỀN ĐIỆN.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20284063841","text":"import bpy\ndef set_motion(object_name,points):\n # points is of type : {frame: (x,y,z)}\n obj=bpy.context.scene.objects[object_name]\n for frame in points:\n obj.location=points[frame][0]\n obj.rotation_euler=points[frame][1]\n obj.keyframe_insert(data_path=\"location\",frame=frame)\n obj.keyframe_insert(\"rotation_euler\", frame = frame)\n\n","repo_name":"mjoshi07/Underwater-Simulator-Oyster-Detection","sub_path":"code/simulate.py","file_name":"simulate.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"35806267505","text":"#!/usr/bin/env python3\n\n# https://gist.github.com/ukBaz/0294521185af72c53f75903fbfb0adb5\n\nimport pydbus\nfrom pydbus import SystemBus\nfrom gi.repository import GLib\nimport sys\nimport time\n\nDEVICE_ADDR = 'D4:DA:FE:8C:7C:EF'\nUUID_BASE = '-0000-1000-8000-00805f9b34fb'\n\nUUID_DEVICE_NAME = '2A29'\nUUID_DEVICE_MODEL = '2A24'\nUUID_FIRMWARE = '2A26'\nUUID_DATA = '2A39'\nUUID_STATUS = 'A001'\nUUID_DATA_RATE = 'A002'\nUUID_SENSORS = 'A003'\nUUID_ANGLE_OFFSET = 'A007'\nUUID_ECOMPAS_CALIB = 'A008'\nUUID_WIND_CORREC = 'A009'\n\n# DBus object paths\nBLUEZ_SERVICE = 'org.bluez'\nADAPTER_PATH = '/org/bluez/hci0'\ndevice_path = f\"{ADAPTER_PATH}/dev_{DEVICE_ADDR.replace(':', '_')}\"\n\n\ndef get_uuid(register):\n return '0000' + register + UUID_BASE\n\ndef get_characteristic_path(dev_path, uuid):\n \"\"\"Look up DBus path for characteristic UUID\"\"\"\n mng_objs = mngr.GetManagedObjects()\n for path in mng_objs:\n chr_uuid = mng_objs[path].get('org.bluez.GattCharacteristic1', {}).get('UUID')\n if path.startswith(dev_path) and chr_uuid == uuid.casefold():\n return path\n\ndef get_data(bus, path):\n calypso = bus.get(BLUEZ_SERVICE, path)\n data = calypso.ReadValue({})\n return data\n\ndef convert_device_name(raw_data):\n print(\"DEVICE_NAME = \", ''.join(chr(i) for i in raw_data))\n\ndef convert_device_model(raw_data):\n print(\"DEVICE_MODE = \", ''.join(chr(i) for i in raw_data))\n\ndef convert_firmawre(raw_data):\n print(\"DEVICE_FIRMWARE = \", ''.join(chr(i) for i in raw_data))\n\ndef convert_measures(raw_data):\n velocity = ((raw_data[1]<<4) + raw_data[0])/100\n wind_direction = ((raw_data[3]<<4) + raw_data[2])\n battery_level = raw_data[4]*10\n temp_level = raw_data[5]-100\n roll = raw_data[6]-90\n pitch = raw_data[7]-90\n ecompass = 360-((raw_data[8]<<4) + raw_data[0])\n\n print(time.time(), velocity, wind_direction, battery_level, temp_level, roll, pitch, ecompass)\n\ndef convert_status(raw_data):\n if(raw_data==0x00):\n print(\"Sleep Mode, Only Advertising\")\n if(raw_data==0x01):\n print(\"Low Power Mode, 1hz and Sensors disabled\")\n if(raw_data==0x02):\n print(\"Normal Mode, All data rate and sensors availables\")\n\ndef convert_measures_rate(raw_data):\n print(\"MEASURE_RATE = \", str(int(raw_data[0])) + \"Hz\")\n\ndef convert_sensor_clinometer(raw_data):\n print(\"ENABLE_CLINOMETER = \", \"ON\" if raw_data==0x01 else \"OFF\")\n\ndef convert_angle_offset(raw_data):\n print(\"ANGLE_OFFSET = \",raw_data[1]<<4 + raw_data[0])\n\ndef convert_ecompass_mode(raw_data):\n print(\"MODE = \", \"Calibration\" if raw_data==0x01 else \"Normal\")\n\ndef convert_wind_corr(raw_data):\n val = 0\n for i in range(4):\n val += (raw_data[i]<<((3-i)*4))\n print(\"WIND_CORR = \", val/32.0)\n\ndef get_device_name(bus):\n raw_data = get_data(bus, calypso_DEVICE_NAME_path)\n convert_device_name(raw_data)\n\ndef get_device_model(bus):\n raw_data = get_data(bus, calypso_DEVICE_MODEL_path)\n convert_device_model(raw_data)\n\ndef get_firmawre(bus):\n raw_data = get_data(bus, calypso_FIRMWARE_path)\n convert_firmawre(raw_data)\n\ndef get_measures(bus):\n raw_data = get_data(bus, calypso_MEASURES_path)\n convert_measures(raw_data)\n\ndef get_status(bus):\n raw_data = get_data(bus, calypso_STATUS_path)\n convert_status(raw_data)\n\ndef get_measures_rate(bus):\n raw_data = get_data(bus, calypso_MEASURES_RATE_path)\n convert_measures_rate(raw_data)\n\ndef get_sensor_clinometer(bus):\n raw_data = get_data(bus, calypso_SENSORS_path)\n convert_sensor_clinometer(raw_data)\n\ndef get_angle_offset(bus):\n raw_data = get_data(bus, calypso_ANGLE_OFFSET_path)\n convert_angle_offset(raw_data)\n\ndef get_ecompass_mode(bus):\n raw_data = get_data(bus, calypso_ECOMPAS_CALIB_path)\n convert_ecompass_mode(raw_data)\n\ndef get_wind_corr(bus):\n raw_data = get_data(bus, calypso_WIND_CORREC_path)\n convert_wind_corr(raw_data)\n\n\n# Enable eventloop for notifications\ndef measure_handler(iface, prop_changed, prop_removed):\n \"\"\"Notify event handler for button press\"\"\"\n if 'Value' in prop_changed:\n new_value = prop_changed['Value']\n #print(f\"Button A state: {new_value}\")\n convert_measures(new_value)\n\n###############################################\n\n# setup dbus\nbus = pydbus.SystemBus()\nmngr = bus.get(BLUEZ_SERVICE, '/')\nadapter = bus.get(BLUEZ_SERVICE, ADAPTER_PATH) \ndevice = bus.get(BLUEZ_SERVICE, device_path)\n\ndevice.Connect()\n\nwhile not device.ServicesResolved:\n sleep(0.5)\n\n\n# Characteristic DBus information\ncalypso_DEVICE_NAME_path = get_characteristic_path(device._path, get_uuid(UUID_DEVICE_NAME))\ncalypso_DEVICE_MODEL_path = get_characteristic_path(device._path, get_uuid(UUID_DEVICE_MODEL))\ncalypso_FIRMWARE_path = get_characteristic_path(device._path, get_uuid(UUID_FIRMWARE))\ncalypso_MEASURES_path = get_characteristic_path(device._path, get_uuid(UUID_DATA))\ncalypso_STATUS_path = get_characteristic_path(device._path, get_uuid(UUID_STATUS))\ncalypso_MEASURES_RATE_path = get_characteristic_path(device._path, get_uuid(UUID_DATA_RATE))\ncalypso_SENSORS_path = get_characteristic_path(device._path, get_uuid(UUID_SENSORS))\ncalypso_ANGLE_OFFSET_path = get_characteristic_path(device._path, get_uuid(UUID_ANGLE_OFFSET))\ncalypso_ECOMPAS_CALIB_path = get_characteristic_path(device._path, get_uuid(UUID_ECOMPAS_CALIB))\ncalypso_WIND_CORREC_path = get_characteristic_path(device._path, get_uuid(UUID_WIND_CORREC))\n\nget_device_name(bus)\nget_device_model(bus)\nget_firmawre(bus)\nget_measures(bus)\nget_status(bus)\nget_measures_rate(bus)\nget_sensor_clinometer(bus)\nget_angle_offset(bus)\nget_ecompass_mode(bus)\nget_wind_corr(bus)\n \n\n# Handler\nmainloop = GLib.MainLoop()\ncalypso = bus.get(BLUEZ_SERVICE, calypso_MEASURES_path)\ncalypso.onPropertiesChanged = measure_handler\ncalypso.StartNotify()\n\nmainloop.run()\n \nmainloop.quit()\ncalypso.StopNotify()\ndevice.Disconnect()\n","repo_name":"ThomasLeMezo/vaavud_wind","sub_path":"calypso.py","file_name":"calypso.py","file_ext":"py","file_size_in_byte":5860,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"9366978519","text":"import helper\n\nprioritySum = 0\ngroupSize = 3\ngroupItems = [set() for _ in range(groupSize)]\nlineCounter = 1\n\nfor line in helper.get_input(3):\n groupIndex = lineCounter % groupSize\n groupItems[groupIndex] = set(line.replace(\"\\n\", \"\"))\n \n if lineCounter % groupSize == 0:\n intersectingItems = groupItems[0]\n for i in range(1, groupSize):\n intersectingItems = intersectingItems.intersection(groupItems[i])\n itemType = intersectingItems.pop()\n asciiOrd = ord(itemType)\n priority = asciiOrd - 38\n if asciiOrd >= 97:\n priority = asciiOrd - 96\n \n prioritySum += priority\n \n lineCounter+=1\n\nprint(\"sum of the priorities {}\".format(prioritySum))","repo_name":"KohlhaseJ/adventofcode2022","sub_path":"src/day-3-2--rucksack.py","file_name":"day-3-2--rucksack.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16932388316","text":"\r\nx = y = c = 0\r\ns = input(\"Введите числа в шестнадцатиричной системе исчисления: \")\r\na = s.split()\r\nfor i in range(len(a)):\r\n a[i] = int(a[i], base=16)\r\nx = a[0]\r\ny = a[1]\r\nc = x ^ y\r\nc = hex(c)\r\nprint(c)\r\n\r\n\r\n","repo_name":"sasha39612/Algoritm_on_Pyhont_MFTI","sub_path":"Exercise_9_A_MFTI.py","file_name":"Exercise_9_A_MFTI.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"541763622","text":"from util.dirichlet import preferDistribution, exchangeDistribution\nfrom options import args_parser\nfrom utils import get_datasets\nfrom FL_MultiTask.models.Update import LocalUpdate\nfrom FL_MultiTask.models.Nets import MLP, CNNMnist, cnn1\nfrom FL_MultiTask.models.test import test_img\nimport copy\nimport numpy as np\n\ndef fl_training(idxs_users, dict_users, p, dataset_train, dataset_test, args, task):\n print('进行{}数据集的训练'.format(task))\n print('参与训练的客户端是{}'.format(idxs_users))\n img_size = dataset_train[0][0].shape\n\n # build model\n if args.model == 'cnn' and task == 'cifar10':\n net_glob = cnn1(num_classes=10).to(args.device)\n # net_glob = CNNCifar(args=args).to(args.device)\n elif args.model == 'cnn' and task == 'mnist' or task == 'fmnist':\n net_glob = CNNMnist(args=args).to(args.device)\n elif args.model == 'mlp':\n len_in = 1\n for x in img_size:\n len_in *= x\n net_glob = MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device)\n else:\n exit('Error: unrecognized model')\n net_glob.train()\n # training\n loss_train = []\n acc_all = []\n for iter in range(args.epochs):\n loss_locals = []\n w_locals = []\n # 本都训练\n for idx in idxs_users:\n # print('客户端{}在训练'.format(idx))\n local = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])\n # local = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])\n w, loss = local.train(net=copy.deepcopy(net_glob).to(args.device))\n print('客户端{}在训练,loss为:{}'.format(idx,loss))\n w_locals.append(copy.deepcopy(w))\n loss_locals.append(copy.deepcopy(loss))\n w_glob = FedAvg(w_locals, p, idxs_users)\n # copy weight to net_glob\n net_glob.load_state_dict(w_glob)\n # print loss\n loss_avg = sum(loss_locals) / len(loss_locals)\n print('Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg))\n loss_train.append(loss_avg)\n\n net_glob.eval()\n acc_test, loss_test = test_img(net_glob, dataset_test, args)\n print(\"Testing accuracy: {:.2f}\".format(acc_test))\n acc_all.append(acc_test)\n\n # net_glob.eval()\n # acc_test, loss_test = test_img(net_glob, dataset_test, args)\n # print(\"Testing accuracy: {:.2f}\".format(acc_test))\n return acc_test, acc_all\n\ndef FedAvg(w, p, idxs_users):\n # print('idxs_users={}'.format(idxs_users))\n w_avg = copy.deepcopy(w[0])\n for k in w_avg.keys():\n w_avg[k] = w_avg[k] * p[idxs_users[0]]\n # print('当key是{}时'.format(k))\n for i in range(1, len(w)):\n # print('计算{}号客户端的参数'.format(idxs_users[i]))\n w_avg[k] += w[i][k] * p[idxs_users[i]]\n # w_avg[k] = torch.div(w_avg[k], len(w))\n return w_avg\n\ndef getP(pic_distribution_every_client):\n P = {}\n index_client = 0\n #sum_data为所有客户端训练集数据量之和:比如:mnist = 60000\n sum_data = np.sum(pic_distribution_every_client).sum()\n # print(\"sum_data={}\".format(sum_data))\n for distribution in pic_distribution_every_client:\n P[index_client] = np.sum(distribution) / sum_data\n index_client += 1\n return P\nargs = args_parser()\ntrain_dataset_fmnist, test_dataset_fmnist = get_datasets('fmnist')\ndict_users_fmnist, distribution_fmnist,_ = preferDistribution(train_dataset_fmnist, args)\ndict_users_fmnist, distribution_fmnist, class_weight_fmnist = exchangeDistribution(dict_users_fmnist,distribution_fmnist, 'fmnist')\np = getP(distribution_fmnist)\nusers = [16, 17, 18, 19, 20, 21, 4, 2, 44, 36, 47, 1]\n# users = [21, 32, 34, 12, 26]\nfor u in users:\n print('{}:{}'.format(u, distribution_fmnist[u]))\nfor u in users:\n print('{}:{}'.format(u, p[u]))\nacc_test, acc_all = fl_training(users, dict_users_fmnist, p, train_dataset_fmnist, test_dataset_fmnist, args, 'fmnist')\nprint(acc_all)\n\n\n\n\n","repo_name":"lgx3/FL_MultiTask","sub_path":"main/testFMNIST.py","file_name":"testFMNIST.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28962348078","text":"\nimport numpy as np\nimport tysoc_bindings\nimport pytysoc\nimport time\n\nNUM_BOXES = 5\nNUM_SPHERES = 5\nNUM_CYLINDERS = 5\nNUM_CAPSULES = 5\nNUM_MESHES = 0\n\ndef createFloor() :\n _collisionData = tysoc_bindings.PyCollisionData()\n _collisionData.type = tysoc_bindings.eShapeType.BOX\n _collisionData.size = [ 10., 10., 1. ]\n\n _visualData = tysoc_bindings.PyVisualData()\n _visualData.type = tysoc_bindings.eShapeType.BOX\n _visualData.size = [ 10., 10., 1. ]\n _visualData.ambient = [ 0.3, 0.5, 0.7 ]\n _visualData.diffuse = [ 0.3, 0.5, 0.7 ]\n _visualData.specular = [ 0.3, 0.5, 0.7 ]\n _visualData.shininess = 50.0\n\n _bodyData = tysoc_bindings.PyBodyData()\n _bodyData.dyntype = tysoc_bindings.eDynamicsType.STATIC\n _bodyData.addCollision( _collisionData )\n _bodyData.addVisual( _visualData )\n\n _floor = tysoc_bindings.PyBody( \"floor\", _bodyData, [ 0., 0., -0.5 ], [ 0., 0., 0. ] )\n\n return _floor\n\ndef createSingleBody( name, shape ) :\n _collisionData = tysoc_bindings.PyCollisionData()\n _visualData = tysoc_bindings.PyVisualData()\n _bodyData = tysoc_bindings.PyBodyData()\n\n if shape == 'box' :\n _collisionData.type = tysoc_bindings.eShapeType.BOX\n _collisionData.size = [ 0.2, 0.2, 0.2 ]\n\n _visualData.type = tysoc_bindings.eShapeType.BOX\n _visualData.size = [ 0.2, 0.2, 0.2 ]\n\n elif shape == 'sphere' :\n _collisionData.type = tysoc_bindings.eShapeType.SPHERE\n _collisionData.size = [ 0.1, 0.1, 0.1 ]\n\n _visualData.type = tysoc_bindings.eShapeType.SPHERE\n _visualData.size = [ 0.1, 0.1, 0.1 ]\n\n elif shape == 'cylinder' :\n _collisionData.type = tysoc_bindings.eShapeType.CYLINDER\n _collisionData.size = [ 0.1, 0.2, 0.1 ]\n\n _visualData.type = tysoc_bindings.eShapeType.CYLINDER\n _visualData.size = [ 0.1, 0.2, 0.1 ]\n\n elif shape == 'capsule' :\n _collisionData.type = tysoc_bindings.eShapeType.CAPSULE\n _collisionData.size = [ 0.1, 0.2, 0.1 ]\n\n _visualData.type = tysoc_bindings.eShapeType.CAPSULE\n _visualData.size = [ 0.1, 0.2, 0.1 ]\n\n elif shape == 'mesh' :\n _collisionData.type = tysoc_bindings.eShapeType.MESH\n _collisionData.size = [ 0.2, 0.2, 0.2 ]\n _collisionData.filename = pytysoc.PATHS.RESOURCES_DIR + \"meshes/monkey.stl\"\n\n _visualData.type = tysoc_bindings.eShapeType.MESH\n _visualData.size = [ 0.2, 0.2, 0.2 ]\n _visualData.filename = pytysoc.PATHS.RESOURCES_DIR + \"meshes/monkey.stl\"\n\n _visualData.ambient = [ 0.7, 0.5, 0.3 ]\n _visualData.diffuse = [ 0.7, 0.5, 0.3 ]\n _visualData.specular = [ 0.7, 0.5, 0.3 ]\n _visualData.shininess = 50.0;\n\n _bodyData = tysoc_bindings.PyBodyData()\n _bodyData.dyntype = tysoc_bindings.eDynamicsType.DYNAMIC\n _bodyData.addCollision( _collisionData )\n _bodyData.addVisual( _visualData )\n \n _position = 4.0 * ( np.random.rand( 3 ) - 0.5 )\n _position[2] = 3.0\n _rotation = np.pi * ( np.random.rand( 3 ) - 0.5 )\n\n _body = tysoc_bindings.PyBody( name,\n _bodyData,\n _position,\n _rotation )\n\n return _body\n\nif __name__ == '__main__' :\n _scenario = tysoc_bindings.PyScenario()\n\n ## _terrainGen = tysoc_bindings.PyStaticTerrainGen( 'terrainGen0' ) \n ## _terrainGen.createPrimitive( 'plane',\n ## [ 10.0, 10.0, 0.2 ],\n ## [ 0.0, 0.0, 0.0 ],\n ## [ 0.0, 0.0, 0.0 ],\n ## [ 0.2, 0.3, 0.4 ],\n ## 'built_in_chessboard' )\n ## _scenario.addTerrainGen( _terrainGen )\n\n _scenario.addBody( createFloor() )\n \n for i in range( NUM_BOXES ) :\n _scenario.addBody( createSingleBody( 'box_' + str( i ), 'box' ) )\n\n for i in range( NUM_SPHERES ) :\n _scenario.addBody( createSingleBody( 'sphere_' + str( i ), 'sphere' ) )\n\n for i in range( NUM_CYLINDERS ) :\n _scenario.addBody( createSingleBody( 'cylinder_' + str( i ), 'cylinder' ) )\n\n for i in range( NUM_CAPSULES ) :\n _scenario.addBody( createSingleBody( 'capsule_' + str( i ), 'capsule' ) )\n\n for i in range( NUM_MESHES ) :\n _scenario.addBody( createSingleBody( 'mesh_' + str( i ), 'mesh' ) )\n \n _runtime = pytysoc.createRuntime( physicsBackend = pytysoc.BACKENDS.PHYSICS.BULLET,\n renderingBackend = pytysoc.BACKENDS.RENDERING.GLVIZ )\n\n _simulation = _runtime.createSimulation( _scenario )\n _visualizer = _runtime.createVisualizer( _scenario )\n \n _simulation.initialize()\n _visualizer.initialize()\n \n _simulation.step()\n _visualizer.render()\n\n _running = False\n \n while _visualizer.isActive() :\n \n if _visualizer.checkSingleKeyPress( tysoc_bindings.KEY_P ) :\n _running = not _running\n elif _visualizer.checkSingleKeyPress( tysoc_bindings.KEY_R ) :\n _simulation.reset()\n elif _visualizer.checkSingleKeyPress( tysoc_bindings.KEY_ESCAPE ) :\n break\n\n start = time.time()\n if _running :\n _simulation.step()\n \n _visualizer.render()\n\n duration = time.time() - start\n print( \"step-time: {} ||| fps: {}\".format( duration, 1.0 / duration ) )","repo_name":"wpumacay/loco-bullet","sub_path":"_legacy/examples/python/primitives/example_primitives.py","file_name":"example_primitives.py","file_ext":"py","file_size_in_byte":5365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20130491745","text":"from pymongo import MongoClient, ASCENDING, DESCENDING\nfrom bson.json_util import dumps\nfrom bson.objectid import ObjectId\nimport json\nimport datetime\nfrom constants.dbpath import db_path\n\nclass DBProject:\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.__client = MongoClient(db_path)\n self.__db = self.__client.circuit\n\n def countDocumentsById(self, projectId: str) -> int:\n return self.__db.projects.count_documents({\n \"_id\": ObjectId(projectId),\n \"isActive\": True\n })\n\n def getAllProjectIds(self) -> dict:\n result = self.__db.projects.find({\n \"isActive\": True\n }, {\n \"_id\": 1\n })\n return json.loads(dumps(result))\n\n def getPublicProjectIds(self) -> dict:\n result = self.__db.projects.find({\n \"isActive\": True,\n \"visibility\": \"public\"\n }, {\n \"_id\": 1\n })\n return json.loads(dumps(result))\n\n def getProjectById(self, proejctId: str) -> dict:\n result = self.__db.projects.find_one({\n \"_id\": ObjectId(proejctId),\n \"isActive\": True\n }, {\n \"_id\": 1,\n \"index\": 1,\n \"title\": 1,\n \"description\": 1,\n \"milestonesList\": 1,\n \"visibility\": 1,\n \"members\": 1,\n \"projectMetaId\": 1,\n \"fields\": 1,\n \"meta\": 1\n })\n return json.loads(dumps(result))\n\n def getProjectsByIds(self, projectIds: \"list of str\") -> dict:\n projectIds = list(map(ObjectId, projectIds))\n result = self.__db.projects.find({\n \"_id\": {\n \"$in\": projectIds\n },\n \"isActive\": True\n }, {\n \"_id\": 1,\n \"index\": 1,\n \"title\": 1,\n \"description\": 1,\n \"milestonesList\": 1,\n \"visibility\": 1,\n \"members\": 1,\n \"projectMetaId\": 1,\n \"fields\": 1,\n \"meta\": 1\n })\n return json.loads(dumps(result))\n\n def getActiveProjectIdsByIds(self, projectIds: \"list of str\") -> \"list of dict\":\n projectIds = list(map(ObjectId, projectIds))\n result = self.__db.projects.find({\n \"_id\": {\n \"$in\": projectIds\n },\n \"isActive\": True\n }, {\n \"_id\": 1\n })\n return json.loads(dumps(result))\n\n def getFieldsById(self, projectId: str) -> \"list of dict\":\n return self.__db.metaProjects.find_one({\n \"_id\": ObjectId(projectId),\n \"isActive\": True\n }, {\n \"_id\": 0,\n \"fields\": 1\n })[\"fields\"]\n\n def getRoleIdOfUserInProject(self, projectId: str, userId: str) -> str:\n result = self.__db.projects.find_one({\n \"_id\": ObjectId(projectId),\n \"isActive\": True,\n \"members.userId\": ObjectId(userId)\n }, {\n \"_id\": 0,\n \"members.$\": 1\n })\n return str(result[\"members\"][0][\"roleId\"]) if result else None\n\n def getAllMilestoneIds(self, projectId: str) -> \"list of dict\":\n result = self.__db.projects.find_one({\n \"_id\": ObjectId(projectId),\n \"isActive\": True\n }, {\n \"_id\": 0,\n \"milestonesList\": 1\n })\n return json.loads(dumps(result))[\"milestonesList\"]\n\n def getAllMetaProjects(self) -> dict:\n result = self.__db.metaProjects.find({}, {\n \"_id\": 1,\n \"index\": 1,\n \"title\": 1,\n \"description\": 1,\n \"fields\": 1,\n \"meta\": 1\n })\n return json.loads(dumps(result))\n\n def isProjectActive(self, projectId: str) -> bool:\n return self.__db.projects.count_documents({\n \"_id\": ObjectId(projectId),\n \"isActive\": True\n }) == 1\n\n def isPubliclyVisible(self, projectId: str) -> bool:\n return self.__db.projects.count_documents({\n \"_id\": ObjectId(projectId),\n \"isActive\": True,\n \"visibility\": \"public\"\n }) == 1\n\n def isInternallyVisible(self, projectId: str) -> bool:\n return self.__db.projects.count_documents({\n \"_id\": ObjectId(projectId),\n \"isActive\": True,\n \"visibility\": \"internal\"\n }) == 1\n\n def isPrivatelyVisible(self, projectId: str) -> bool:\n return self.__db.projects.count_documents({\n \"_id\": ObjectId(projectId),\n \"isActive\": True,\n \"visibility\": \"private\"\n }) == 1\n\n def isInternalAndHasThisMember(self, projectId: str, userId: str) -> bool:\n return self.__db.projects.count_documents({\n \"_id\": ObjectId(projectId),\n \"isActive\": True,\n \"visibility\": \"internal\",\n \"members.userId\": ObjectId(userId)\n }) == 1\n\n def isPrivateAndHasThisMember(self, projectId: str, userId: str) -> bool:\n return self.__db.projects.count_documents({\n \"_id\": ObjectId(projectId),\n \"isActive\": True,\n \"visibility\": \"private\",\n \"members.userId\": ObjectId(userId)\n }) == 1\n\n def hasThisMember(self, projectId: str, userId: str) -> bool:\n return self.__db.projects.count_documents({\n \"_id\": ObjectId(projectId),\n \"isActive\": True,\n \"members.userId\": ObjectId(userId)\n }) == 1\n\n def insertMetaProject(self, metaProject: dict) -> str:\n _id = self.__db.metaProjects.insert_one(metaProject).inserted_id\n return str(_id)\n\n def insertProject(self, project: dict) -> str:\n _id = self.__db.projects.insert_one(project).inserted_id\n return str(_id)\n\n def insertMilestoneIdToProject(self, projectId: str, milestoneId: str) -> bool:\n return self.__db.projects.update_one({\n \"_id\": ObjectId(projectId)\n }, {\n \"$push\": {\n \"milestonesList\": ObjectId(milestoneId)\n }\n }).modified_count == 1\n\n def updateProject(self, projectId: str, project: dict) -> bool:\n return self.__db.projects.update_one({\n \"_id\": ObjectId(projectId)\n }, {\n \"$set\": {\n \"title\": project[\"title\"],\n \"description\": project[\"description\"],\n \"visibility\": project[\"visibility\"],\n \"projectMetaId\": project[\"projectMetaId\"],\n \"fields\": project[\"fields\"],\n \"members\": project[\"members\"],\n \"meta.lastUpdatedBy\": project[\"meta\"][\"lastUpdatedBy\"],\n \"meta.lastUpdatedOn\": project[\"meta\"][\"lastUpdatedOn\"]\n }\n }).modified_count == 1\n\n def removeMemberFromProject(self, userId: str, projectId: str) -> bool:\n self.__db.projects.update_one({\n \"_id\": ObjectId(projectId)\n }, {\n \"$pull\": {\n \"members\": ObjectId(userId)\n }\n }).modified_count == 1\n","repo_name":"kartikeybhardwaj/circuit-bapi","sub_path":"database/project/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":7046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12473013917","text":"\"\"\"\r\nبرنامه‌ای بنویسید که 10 عدد از ورودی بخواند و در انتها عددی که بیشترین تعداد مقسوم‌علیه عدد اول را دارد به همراه تعداد مقسوم‌علیه‌های اول\r\nآن، در خروجی چاپ کند. اگر چند عدد این حالت را داشتند، بزرگترین آن‌ها را چاپ کند.\r\n\"\"\"\r\n#chatgpt help\r\ndef prime_factors(n):\r\n \"\"\"Return a set of the prime factors of a positive integer.\"\"\"\r\n factors = set()\r\n d = 2\r\n while n > 1:\r\n while n % d == 0:\r\n factors.add(d)\r\n n //= d\r\n d += 1\r\n if d*d > n:\r\n if n > 1:\r\n factors.add(n)\r\n break\r\n return factors\r\n\r\nnumbers = []\r\nfor i in range(10):\r\n num = int(input(\"\"))\r\n numbers.append(num)\r\n\r\nmax_count = 0\r\nmax_prime = None\r\n\r\nfor num in numbers:\r\n factors = prime_factors(num)\r\n count = len(factors)\r\n if count > max_count:\r\n max_count = count\r\n max_prime = num\r\n elif count == max_count and num > max_prime:\r\n max_prime = num\r\n\r\nprint(max_prime, max_count)","repo_name":"NobodyProgramer04/Mehrab_Danandeh","sub_path":"maghsoom_alayh_aval.py","file_name":"maghsoom_alayh_aval.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"fa","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72657141814","text":"#data.py\r\n#William Sexton\r\n#2/16/17\r\nimport csv\r\nimport numpy as np\r\nfrom scipy import sparse\r\nimport collections\r\nimport itertools\r\n\r\n\r\nclass Data:\r\n \"\"\"Process ACS data for MWEM algorithm. Builds historgram from input file.\"\"\"\r\n def __init__(self,fname):\r\n with open(fname,'r') as f:\r\n data_iter = csv.reader(f, \r\n delimiter = ',', \r\n quotechar = '\"')\r\n data_iter.next() #Skips file header.\r\n d = collections.defaultdict(int)\r\n \r\n for a in range(797): #Number of income bins in ACS population-level datafile.\r\n d[a]=0 #Initialize bin counts.\r\n \r\n for data in data_iter:\r\n d[map(int,data)[0]] +=1 #increment count of income bin associated with data record.\r\n \r\n self.db=d #database\r\n self.hist=np.asarray(d.values(),dtype='int64') #histogram of binned income.\r\n self.n=np.sum(self.hist) #number of records\r\n self.dimChi=len(self.hist) #size of universe\r\n \r\n \r\n","repo_name":"labordynamicsinstitute/economics-of-privacy-replication","sub_path":"programs/analysis/03_ACS_Income_MWEM/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"17074694697","text":"from __future__ import unicode_literals\n\nfrom django.contrib.auth.models import User\nfrom django.db import models\n\n\nclass Quiz(models.Model):\n\n class Meta:\n verbose_name = \"Quiz\"\n verbose_name_plural = \"Quizzes\"\n\n def __str__(self):\n return self.title\n\n title = models.CharField(max_length=50, default=\"\")\n content = models.TextField()\n\n\nclass Question(models.Model):\n\n class Meta:\n verbose_name = \"True/false question\"\n verbose_name_plural = \"True/false questions\"\n\n def __str__(self):\n return self.statement\n\n def next(self):\n \"\"\"The next question in the quiz, or None if this is the last\"\"\"\n # Note: this is expensive (linked list would be better), but quizzes are short\n questions = list(self.quiz.question_set.all())\n if not (self == questions[-1]):\n current_pos = questions.index(self)\n return questions[current_pos + 1]\n\n quiz = models.ForeignKey(Quiz)\n statement = models.CharField(max_length=200)\n answer = models.BooleanField(\"True?\")\n\n\nclass UserAnswer(models.Model):\n\n class Meta:\n verbose_name = \"User answer\"\n verbose_name_plural = \"User answers\"\n\n def __str__(self):\n return \"{}: {}: {}\".format(self.user, self.question, self.answer)\n\n user = models.ForeignKey(User)\n question = models.ForeignKey(Question)\n answer = models.BooleanField()\n\n\n","repo_name":"pbx/django-quiz","sub_path":"quiz/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21944302077","text":"\"\"\"\n@author: Galileo garibaldi\n@date: 20/02/2021\n@description: Ejemplo 3\n\"\"\"\n###Definir la función\ndef funcion3(a,b, c):###Recibimos parametros\n d = a*b*c ###Definimos una variable para almacenar el reesultado\n ###En este caso no es necesario poner una impresion (print)\n return d\n \nresultado = funcion3(1,2,3)\n##imprime el resultado, pero no retorna nada\nprint(resultado)","repo_name":"galigaribaldi/Python-Turtle","sub_path":"Modulo 2/Ejemplos/ejemplo-03.py","file_name":"ejemplo-03.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28008221214","text":"from util.instrumentation import time_instrument\nfrom random import randrange\nimport datetime as dt\n\ndef record_wager(self, lucky_number, wager, outcome, chosen_number, win_multiplier, cheated_death):\n wager_time = dt.datetime.now().strftime(\"%s\")\n\n self.execute(\"INSERT INTO wager_history (lucky_number, wager, outcome, wager_time, chosen_number, win_multiplier, cheated_death) VALUES (?, ?, ?, ?, ?, ?, ?)\", [lucky_number, wager, outcome, wager_time, chosen_number, win_multiplier, cheated_death])\n\ndef add_comment(self, image, comment):\n\n self.execute(\"insert into image_comments (file_name, comment) values (?, ?)\", [image, comment])\n\ndef get_image_comments(self, image):\n\n return self.execute(\"select comment from image_comments where file_name=?\", [image])\n\ndef get_nsfw_images(self):\n\n return self.execute(\"select * from images where nsfw=1\")\n\ndef get_top_images(self, num_images=20, order=\"desc\"):\n\n if order == \"desc\":\n return self.execute(\"select * from images order by votes desc limit ?\", [num_images])\n else:\n return self.execute(\"select * from images order by votes asc limit ?\", [num_images])\n\ndef get_image_votes(self, image):\n\n votes = 0\n\n try:\n cursor = self.execute(\"select votes from images where file_name=?\", [image])\n result = cursor.next()\n votes = int(result[\"votes\"])\n except StopIteration:\n self.execute(\"insert into images (file_name) values (?)\", [image])\n votes = 0\n except:\n votes = 0\n\n return votes\n\ndef get_random_comment(self):\n #if this gets slow, it's because it selects the whole table first\n cursor = self.execute(\"SELECT distinct comment FROM image_comments ORDER BY Random() LIMIT 1\")\n\n result = cursor.next()\n\n return result[\"comment\"]\n\ndef get_random_utterance(self, seed=None):\n #if this gets slow, it's because it selects the whole table first\n\n if seed:\n sql_seed = '%' + seed + '%'\n cursor = self.execute(\"SELECT speech_text FROM snippets where speech_text like ? ORDER BY Random() LIMIT 1\", [sql_seed])\n else:\n cursor = self.execute(\"SELECT speech_text FROM snippets ORDER BY Random() LIMIT 1\")\n\n try:\n result = cursor.next()\n except StopIteration:\n if seed:\n return \"It seems as though nobody has mentioned %s\" % seed\n\n return result[\"speech_text\"]\n\ndef check_nsfw(self, image):\n\n nsfw = 0\n try:\n cursor = self.execute(\"select nsfw from images where file_name=?\", [image])\n result = cursor.next()\n nsfw = int(result[\"nsfw\"])\n except:\n pass\n\n return nsfw == 1\n\ndef add_image(self, file_name):\n image = None\n\n try:\n image = self.execute(\"select file_name from images where file_name=?\", [file_name]).next()\n except StopIteration:\n image = None\n \n if not image:\n self.execute(\"INSERT INTO images (file_name, votes, nsfw) values (?, 0, 0)\", [file_name])\n\ndef check_appropriate(self, image):\n appropriate = True\n try:\n cursor = self.execute(\"select nsfw, votes from images where file_name=?\", [image])\n result = cursor.next()\n if result[\"nsfw\"]:\n nsfw = int(result[\"nsfw\"])\n else:\n nsfw = 0\n\n if result[\"votes\"]:\n votes = int(result[\"votes\"])\n else:\n votes = 0\n\n if nsfw == 1 or votes < -3:\n appropriate = False\n\n except TypeError:\n appropriate = True\n except StopIteration:\n appropriate = True\n\n return appropriate\n\n@time_instrument\ndef get_random_image(self):\n\n min_votes = self.execute(\"SELECT min(votes) as min_votes FROM images where votes >= 0 and nsfw <> 1\").next()[\"min_votes\"]\n\n image_count = self.execute(\"SELECT count(*) as image_count FROM images where votes = ? and nsfw <> 1\", [min_votes]).next()[\"image_count\"]\n\n if image_count == 0:\n return \"\"\n\n image_limit = randrange(0, image_count)\n\n return self.execute(\"SELECT file_name FROM images where votes between -5 and ? and nsfw <> 1 LIMIT ?, 1\", [min_votes, image_limit]).next()[\"file_name\"]\n","repo_name":"joshuag/speakerbot","sub_path":"speakerbot/speaker_db_plugins/speaker_images_comments.py","file_name":"speaker_images_comments.py","file_ext":"py","file_size_in_byte":4130,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"21592824075","text":"##Junjie Lin 25792830 Project 5\n\n## This module contains a RadioAPP class, which raise by the main game.\n## Then pop out a window for user to set up for board.\n\n\nimport tkinter\nclass RadioAPP:\n def __init__(self):\n self._root_window = tkinter.Toplevel()\n\n subject = tkinter.Label(self._root_window, text = 'Set Up',\n font = ('Helvetica', 15))\n subject.grid(row = 0, column = 0)\n\n column_x = tkinter.Label(self._root_window, text = 'What size of column do you like?',\n font = ('Helvetica', 13))\n column_x.grid(row = 1, column = 0, sticky = tkinter.W)\n \n self.COLUMN= tkinter.IntVar()\n b1= tkinter.Radiobutton (self._root_window, text='4', value =4, variable = self.COLUMN)\n b1.grid(row = 1, column = 1, sticky = tkinter.W)\n b2= tkinter.Radiobutton (self._root_window, text='6', value =6, variable = self.COLUMN)\n b2.grid(row = 1, column = 2, sticky = tkinter.W)\n b3= tkinter.Radiobutton (self._root_window, text='8', value =8, variable = self.COLUMN)\n b3.grid(row = 1, column = 3, sticky = tkinter.W)\n b4= tkinter.Radiobutton (self._root_window, text='10', value =10, variable = self.COLUMN)\n b4.grid(row = 1, column = 4, sticky = tkinter.W)\n b5= tkinter.Radiobutton (self._root_window, text='12', value =12, variable = self.COLUMN)\n b5.grid(row = 1, column = 5, sticky = tkinter.W)\n b6= tkinter.Radiobutton (self._root_window, text='14', value =14, variable = self.COLUMN)\n b6.grid(row = 1, column = 6, sticky = tkinter.W)\n b7= tkinter.Radiobutton (self._root_window, text='16', value =16, variable = self.COLUMN)\n b7.grid(row = 1, column = 7, sticky = tkinter.W)\n\n row_y = tkinter.Label(self._root_window, text = 'What size of row do you like?',\n font = ('Helvetica', 13))\n row_y.grid(row = 2, column = 0,sticky = tkinter.W)\n \n self.ROW= tkinter.IntVar()\n b8= tkinter.Radiobutton (self._root_window, text='4', value =4, variable = self.ROW)\n b8.grid(row = 2, column = 1, sticky = tkinter.W)\n b9= tkinter.Radiobutton (self._root_window, text='6', value =6, variable = self.ROW)\n b9.grid(row = 2, column = 2, sticky = tkinter.W)\n b10= tkinter.Radiobutton (self._root_window, text='8', value =8, variable = self.ROW)\n b10.grid(row = 2, column = 3, sticky = tkinter.W)\n b11= tkinter.Radiobutton (self._root_window, text='10', value =10, variable = self.ROW)\n b11.grid(row = 2, column = 4, sticky = tkinter.W)\n b12= tkinter.Radiobutton (self._root_window, text='12', value =12, variable = self.ROW)\n b12.grid(row = 2, column = 5, sticky = tkinter.W)\n b13= tkinter.Radiobutton (self._root_window, text='14', value =14, variable = self.ROW)\n b13.grid(row = 2, column = 6, sticky = tkinter.W)\n b14= tkinter.Radiobutton (self._root_window, text='16', value =16, variable = self.ROW)\n b14.grid(row = 2, column = 7, sticky = tkinter.W)\n\n who_goes_first = tkinter.Label(self._root_window, text = 'Who goes first?',\n font = ('Helvetica', 13))\n who_goes_first.grid(row = 3, column = 0, sticky = tkinter.W)\n \n self.first_move= tkinter.IntVar()\n b111= tkinter.Radiobutton (self._root_window, text='Black', value =1, variable = self.first_move)\n b111.grid(row = 4, column = 0, sticky = tkinter.W)\n b112= tkinter.Radiobutton (self._root_window, text='White', value =2, variable = self.first_move)\n b112.grid(row = 5, column = 0, sticky = tkinter.W)\n\n First4 = tkinter.Label(self._root_window, text = 'How to set the first 4 pieces?',\n font = ('Helvetica', 13))\n First4.grid(row = 6, column = 0,sticky = tkinter.W)\n\n self.top_set= tkinter.IntVar()\n b33= tkinter.Radiobutton (self._root_window, text='WB', value =1, variable = self.top_set)\n b33.grid(row = 7, column = 0, sticky = tkinter.W)\n b44= tkinter.Radiobutton (self._root_window, text='BW', value =2, variable = self.top_set)\n b44.grid(row = 8, column = 0, sticky = tkinter.W)\n\n How_to_win = tkinter.Label(self._root_window, text = 'How to win?',\n font = ('Helvetica', 13))\n How_to_win.grid(row = 9, column = 0,sticky = tkinter.W)\n\n self.set_win= tkinter.IntVar()\n b30= tkinter.Radiobutton (self._root_window, text='More', value =1, variable = self.set_win)\n b30.grid(row = 10, column = 0, sticky = tkinter.W)\n b40= tkinter.Radiobutton (self._root_window, text='Less', value =2, variable = self.set_win)\n b40.grid(row = 11, column = 0, sticky = tkinter.W)\n\n button_frame = tkinter.Frame(self._root_window)\n button_frame.grid(row = 12, column = 0, sticky = tkinter.E)\n\n ok_button = tkinter.Button(\n button_frame, text = 'OK', font = ('Helvetica', 20),\n command = self._on_ok)\n ok_button.grid(row = 0, column = 0,sticky = tkinter.E)\n\n cancel_button = tkinter.Button(\n button_frame, text = 'Cancel', font = ('Helvetica', 20),\n command = self._on_cancel)\n cancel_button.grid(row = 0, column = 1, sticky = tkinter.E)\n\n self._root_window.rowconfigure(0, weight = 10)\n self._root_window.rowconfigure(1, weight = 10)\n self._root_window.rowconfigure(2, weight = 10)\n self._root_window.rowconfigure(3, weight = 10)\n self._root_window.rowconfigure(4, weight = 10)\n self._root_window.rowconfigure(5, weight = 10)\n self._root_window.rowconfigure(6, weight = 10)\n self._root_window.rowconfigure(7, weight = 10)\n self._root_window.rowconfigure(8, weight = 10)\n self._root_window.rowconfigure(9, weight = 10)\n self._root_window.rowconfigure(10, weight = 10)\n self._root_window.rowconfigure(11, weight = 10)\n self._root_window.rowconfigure(12, weight = 10)\n self._root_window.columnconfigure(0, weight = 10)\n self._root_window.columnconfigure(1, weight = 10)\n self._root_window.columnconfigure(2, weight = 10)\n self._root_window.columnconfigure(3, weight = 10)\n self._root_window.columnconfigure(4, weight = 10)\n self._root_window.columnconfigure(5, weight = 10)\n self._root_window.columnconfigure(6, weight = 10)\n self._root_window.columnconfigure(7, weight = 10)\n\n button_frame.rowconfigure(0, weight = 10)\n button_frame.columnconfigure(0, weight = 10)\n button_frame.columnconfigure(1, weight = 10)\n\n self._ok_clicked = False\n self._cancel_clicked = False\n self._column = 0\n self._row = 0\n self._first_move = ''\n self._top_setting = ''\n self._how_to_win = ''\n \n def show(self):\n self._root_window.grab_set()\n self._root_window.wait_window()\n def ok_clicked(self):\n return self._ok_clicked\n def cancel_clicked(self):\n return self._cancel_clicked\n def column_width(self):\n return self._column\n def row_height(self):\n return self._row\n def First_Move(self):\n return self._first_move\n def TopSetting(self):\n return self._top_setting\n def HowToWIN(self):\n return self._how_to_win\n \n def _on_ok(self):\n self._ok_clicked = True\n self._column = self.COLUMN.get()\n self._row = self.ROW.get()\n self._first_move = self.first_move.get()\n self._top_setting = self.top_set.get()\n self._how_to_win = self.set_win.get()\n self._root_window.destroy()\n def _on_cancel(self):\n self._cancel_clicked = True\n self._root_window.destroy()\nif __name__ == '__main__':\n RadioAPP().show()\n\n\ndef check_input(information):\n ##Check all of the inputs \n ##If user did not click one or more of them,return False\n ##If so, return all of the inputs\n if information[0] in [4,6,8,10,12,14,16]:\n information[0] = information[0]\n\n if information[1] in [4,6,8,10,12,14,16]:\n information[1] = information[1]\n\n if information[2] == 1:\n information[2] = ('black')\n elif information[2] == 2:\n information[2] = ('white')\n\n if information[3] == 1:\n information[3] = 'wb'\n elif information[3] == 2:\n information[3] = 'bw'\n\n if information[4] == 1:\n information[4] = 'more'\n elif information[4] == 2:\n information[4] = 'less'\n\n if information[0]==0 or information[1]==0 or information[2] == 0 or information[3] ==0 or information[4] == 0:\n return False\n else:\n return (information)\n\n##check_input()\n\n\n\n","repo_name":"409230250/Programming-with-Software-Libraries-on-Python","sub_path":"Project 5/inputs.py","file_name":"inputs.py","file_ext":"py","file_size_in_byte":8654,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"21"} +{"seq_id":"32612882433","text":"def insertionSort(ar):\n for i in range(len(ar)):\n check=i\n while check > 0 and ar[check-1] > ar[i]:\n temp=ar[i]\n ar[i]=ar[check-1]\n ar[check-1]=temp\n i=check-1\n check-=1\n\nif __name__ == '__main__':\n arr=input('Enter numbers to sort separated by space: ').split(' ')\n arr=list(map(lambda x:int(x),arr))\n insertionSort(arr)\n print(arr)","repo_name":"JiteshSindhare/Algorithms","sub_path":"Sorting/InsertionSort.py","file_name":"InsertionSort.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33741076502","text":"\"\"\"Removes the host_id from the Location model\n\nRevision ID: 94f88d0bf9bb\nRevises: ab25240c9bf9\nCreate Date: 2017-05-20 16:37:48.962292\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = '94f88d0bf9bb'\ndown_revision = 'ab25240c9bf9'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint('location_host_id_fkey', 'location', type_='foreignkey')\n op.drop_column('location', 'host_id')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('location', sa.Column('host_id', sa.INTEGER(), autoincrement=False, nullable=True))\n op.create_foreign_key('location_host_id_fkey', 'location', 'users', ['host_id'], ['id'])\n # ### end Alembic commands ###\n","repo_name":"Rdbaker/Mealbound","sub_path":"migrations/versions/94f88d0bf9bb_.py","file_name":"94f88d0bf9bb_.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"8175744209","text":"import networkx as nx\nimport taxidata as td\nimport numpy as np\nfrom ..object import taxiarray, trajectory, Dataset\nfrom .ksegment import *\nfrom tqdm import tqdm\n\nclass PathContainer:\n \"\"\"class for mapmatching. It save the path and its cost.\"\"\"\n\n def __init__(self, segment_id, cost):\n self.segments = [segment_id]\n self.cost = cost\n self.index = 1\n\n def __lt__(self, other):\n '''sorting'''\n return self.costother.cost\n\n def __le__(self, other):\n '''sorting'''\n return self.cost<=other.cost\n\n def __ge__(self, other):\n '''sorting'''\n return self.cost>=other.cost\n\n def __del__(self):\n del self.cost, self.index\n\n def optimize(self, path, delta_cost):\n assert delta_cost<0, \"optimize must reduce cost.\"\n self.segments[:path.index] = path.segments\n self.cost += delta_cost\n\n def copy(self):\n temp = self.__class__(0, self.cost)\n temp.segments = self.segments.copy()\n temp.index = self.index\n return temp\n\n def update(self, segment, cost):\n self.segments.append(segment.id)\n self.cost += cost\n self.index += 1\n\n\n\n\nclass SingleTrackMapMatching:\n \"\"\"object for single-track map matching.\"\"\"\n _default_segment_func = k_segments_strict_bfs_with_length\n\n def __init__(self, trajectory, road_network):\n #argument\n self.map = road_network\n self.target = trajectory\n\n #segment generation\n self.segment_set = [] # list of segments\n self.node_segments = {} # node to segment dictionary\n self.segments_index = 0\n self.candidate = []\n\n #dynamic programming (save results of each calculation to reuse)\n self.distance_map = [] # this will be double dictionay (i.e. distance_map[index][(index_of_seg)])\n self.stitching_map = {} # this will be double dictionay (i.e. stitching_map[(index_of_seg1)][(index_of_seg2)])\n\n #mathcing algorithm will be implemented as bfs manner.\n self.length = len(self.target)\n\n def generate_ksegment(self, k = 800, seg_func = None):\n \"\"\"Segment generating function. return dictionary of segments by node.\n\n Parameters\n ----------\n seg_func : `func`\n segment generation function. function must be defined as the form of `function(Graph, node, k)`\n k : `int` or `float`\n a length threshold of ksegment. default is 800(m).\n\n Returns\n -------\n `dict`\n a dictionary of node to segment whose key is the start node. return will be saved on `self.segments`\n\n ########## Please add a description of ksegment here. ##########\n\n #################################################################\n\n Each segment will have the identical index for comfortable calculation.\n we will save the cost of each matching cost based on the index of segments.\n ***Every generated segment must be in a `self.stitching_map` as a key with an empty dictionary as a value.\n \"\"\"\n if seg_func is None:\n gen = SingleTrackMapMatching._default_segment_func\n else: gen = seg_func\n\n for node in self.map.nodes:\n segment_at_node = gen(self.map, node, k)\n for i in segment_at_node:\n self.segment_set.append(i)\n i.id = self.segments_index\n self.segments_index+=1\n\n def segment_take(self, trajectory):\n \"\"\"Segment function. Take some segment close to trajectory.\n\n Parameters\n ----------\n trajectory : `taxidata.trajectory`\n a sequence of converted GPS data(UTM coordinate). It must have `pos` attribute.\n\n Returns\n -------\n `list`\n Two lists of segment id & position close to trajectory.\n\n \"\"\"\n grid_set = td.taxiarray.grid_set(trajectory)\n start_set = td.start(grid1_set)\n\n seg_id=[]\n seg_xy=[]\n\n for z in start_set:\n for j in z:\n seg_id.append(td.k_segments_strict_bfs_with_length(td.Roadnetwork(), j, 800))\n for i in td.k_xy(j):\n seg_xy.append(i)\n\n seg_id=sum(seg_id,[])\n real_xy=[]\n real_id=[]\n\n for i in range(len(seg_xy)):\n if np.isin(td.taxiarray.trajectory_grid(seg_xy[i]), grid_set).all():\n real_xy.append(seg_xy[i])\n real_id.append(seg_id[i])\n\n return real_xy, real_id\n\n\n def make_candidate_set(self, trajectory, ksegment_set, real_id, d_max = 200):\n \"\"\"Find a candidate segment set with stored ksegments through calculating the distance of curve.\n\n Parameters\n ----------\n trajectory : `taxidata.trajectory`\n a sequence of converted GPS data(UTM coordinate). It must have `pos` attribute.\n ksegment_set : `list`\n a list of ksegments.\n d_max : `int` or `float`\n a threshold of maximum distance between positions of trajectory and segments. the default value is 200 (m).\n\n Returns\n -------\n `list`\n a list with same cardinality of trajectory. Each component of list is a list of candidate segments(or index (not fixed.)).\n\n Please add a description of variable parametrization here.\n\n + each calculation must be saved on `self.distance_map`.+저장 부분 안 만듦\n \"\"\"\n\n candidate_set=[[]for i in range(len(trajectory))]\n for i in range(len(trajectory)):\n for j in range(len(ksegment_set)):\n if (td.trajectory.trajectory_grid(real_xy[j][0],point=True)==td.trajectory.grid_set(points[i],point=True)).any():\n if td.trajectory.distance_of_curve(self, i, ksegment_set[j])<=d_max:\n candidate_set[i].append(real_id[j])\n\n return candidate_set\n\n\n def path_optimizing(self, dis_weight=1, stitch_weight=10):\n \"\"\"Find a optimized path through minimizing\n the sum of distance and stitching score.\n\n Parameters\n ----------\n dis_weight : `float`\n a cost weight of distance.\n stitch_weight : `float`\n a cost weight of stitching score.\n\n Returns\n -------\n (`path`, `float`)\n a list of index of selected segments, and its cost.\n\n A map matching is pretty heavy calculation because\n basically the number of opertunity exponentially will be increasing\n via the size of sequence.\n So, we need to reduce the calculation time through dynamic programming\n or so-called **Dijkstra algorithm** which is the algoritm for finding the shortest path.\n Through set a cost as a huristic(c.f. length in finding shortest path),\n we can optimize the problem as same as minimizing distance.\n It reduces time complexity from `O(n^l)` to `O(l*n^2)` where\n `n` denotes the mean cardinality of each candidate set, and\n `l` denotes the length of sequence.\n \"\"\"\n #weights\n alpha = dis_weight\n beta = stitch_weight\n\n length = len(self.targets)\n\n #cost and path\n cost = [np.zeros([layer[1].shape[0]],dtype = np.float64) for layer in self.distance_map]\n path = [{} for i in range(length-1)]\n\n #initialize\n for seg in self.distance_map[0]:\n cost[0][seg] += alpha * self.distance_map[0][1]\n\n for i in tqdm(range(len(self.target))-1):\n order = i+1\n start_node_num = self.distance_map[i][1].shape[0]\n end_node_num = self.distance_map[i+1][1].shape[0]\n\n #(n)->(m) costs minimization\n costs = np.zeros([start_node_num, end_node_num], dtype = np.float64)\n #computed minimum costs of last node\n costs += cost[i]\n costsT = costs.T\n #adding distance cost of next nodes\n costsT += alpha * self.distance_map[i+1][1]\n for j,start_seg in enumerate(self.distance_map[i][0]):\n for k,end_seg in enumerate(self.distance_map[i+1][0]):\n self.stitching_map[j][k] = self.stitching_map[j].get(k, self.segment_set[j].stitch_score(self.segment_set[k]))\n costs[j][k] += self.stitching_map[j][k]\n minimum = costs.argmin(axis = 1)\n cost[order] = costs.T[minimum]\n for k,end_seg in enumerate(self.distance_map[i+1][0]):\n path[i][end_seg] = self.distance_map[i][0][minimum[k]]\n\n #results\n min_index = cost[-1].argmin()\n cost_min = cost[-1][min_index]\n selected_path = [self.distance_map[-1][0][min_index]]\n for i in range(length-1):\n inv = length - 1 - i\n selected_path.append(path[inv][selected_path[-1]])\n selected_path.reverse()\n return selected_path, cost_min\n\n def path_stitching(self, segments):\n \"\"\"Make a whole path with given segments.\n\n Parameters\n ----------\n segments : `list`\n a list of index of segment which will be stitched.\n\n Returns\n -------\n `taxidata.Segment`\n a whole path which is stitched by given segments.\n\n Combine semgents to one Giant segment which similar to given trajectory. If semgents\n don't overlap, choose the shortest path between two segments which are apart. Using\n networkx.shortest_path when look for shortest path.\n \"\"\"\n Joint_node = segments[0].nodes()\n for seg in segments[1:]:\n start_overlap = np.where(Joint_node == seg.nodes()[0])[0]\n if len(start_overlap)>0:\n Joint_node = np.r_[Joint_node[:start_overlap[0]], seg.nodes()]\n else:\n shortest_path = nx.shortest_path(self.map, Joint_node[-1], seg.nodes()[0],'length')\n shortest_path_array = np.zeros([len(shortest_path)])\n for i in range(len(shortest_path)): shortest_path_array[i]=shortest_path[i]\n Joint_node = np.r_[Joint_node, shortest_path_array[1:], seg.nodes()[1:]]\n edge_in = (Joint_node[0],Joint_node[1],0,self.map.get_edge_data(Joint_node[0],Joint_node[1],0))\n Jointsegment = Segment(edge_in)\n for edge_count in range(len(Joint_node)-2):\n edge_in = (Joint_node[edge_count+1],Joint_node[edge_count+2],0,self.map.get_edge_data(Joint_node[edge_count+1],Joint_node[edge_count+2],0))\n Jointsegment = Jointsegment.expand(edge_in)\n return Jointsegment\n\n def segment_to_line(self, segment):\n \"\"\"Change semgent to nodes' position.\n\n Parameter\n ----------\n segment : road segment\n\n Return\n -------\n pos_array : np.array([x1,y1],[x2,y2],...)\n Which are segment's nodes' positions array.\n\n Segment's nodes' each positions are used to measuring distances between\n road segments and taxi trajectories\n \"\"\"\n pos_list = np.zeros([len(segment.nodes()),2])\n for c in range(len(segment.nodes())):\n pos_list[c] = self.map.nodes[segment.nodes()[c]]['pos']\n return pos_list\n\n def point_projection(self, path):\n \"\"\"Find edges that points of `self.target` belong in.\n\n Parameters\n ----------\n path : `taxidata.Segment`\n A complete path which will be a map with projection.\n\n Returns\n -------\n `list`\n the list of tuples indicate the edges of road network.\n\n Assign trajectory's each point to edge of road segment. Taxi Gps data include\n some noise, so it's hard to find correct road which taxi driving on. So mearsure\n distance between road(edge) and taxi GPS point and assign to the most closest edge.\n \"\"\"\n edge_list = []\n path_line = segment_to_line(self,path)\n for target_point in self.target:\n distance_list = distance_line_point_new(path_line, target_point)\n edge_list.append(path.edges()[np.where(min(distance_list)==distance_list)[0][0]])\n return edge_list\n\n\n\nclass MultitrackMapMatching(SingleTrackMapMatching):\n \"\"\"object for multi-track mapmatching.\"\"\"\n\n def __init__(self, trajectory_list, road_network):\n super(MultitrackMapMatching, self).__init__(trajectory_list,road_network)\n\n\n def matching(self):\n pass\n","repo_name":"JungHoonJung/complecity","sub_path":"taxidata/core/network/match.py","file_name":"match.py","file_ext":"py","file_size_in_byte":12651,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"18744595498","text":"import json\ndef main():\n with open('../raw') as f:\n raw=f.read().splitlines()\n with open('../document.json') as f:\n data=json.load(f)\n for i in data.values():\n for plug in i:\n name,*_=plug\n raw.remove(name)\n print('\\n'.join(raw))\nif __name__=='__main__':\n main()\n","repo_name":"altermo/vim-plugin-list","sub_path":"other/non-documented.py","file_name":"non-documented.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"21"} +{"seq_id":"38599568976","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport netCDF4 as nc\nimport string\nimport subprocess as sys\n\noutput = sys.Popen([\"ls /work4/L.r02229011/WRF_3_5_1/WRFV3/run/control_1986_36hr/wrfout_d02_1986-02-* |head -n 1\"],shell=True, stdout=sys.PIPE).communicate()[0]\nfilenames=string.split(output, '\\n')\nfilenames.pop()\nFil=nc.MFDataset(filenames)\nprint(\"===\")\nprint(filenames)\nprint(\"Total files: \"+str(len(filenames)))\n############\noutput = sys.Popen([\"ls /work4/L.r02229011/WRF_3_5_1/WRFV3/run/irr_1986_36hr_both/wrfout_d02_1986-02-* |head -n 1\"],shell=True, stdout=sys.PIPE).communicate()[0]\nfilenames=string.split(output, '\\n')\nfilenames.pop()\nFilExp=nc.MFDataset(filenames)\nprint(filenames)\nprint(\"Total files: \"+str(len(filenames)))\n############\ntmpString=[]\nVar=[]\nTimeString=[]\nprint(\"Time len: \"+str(len(Fil.variables[\"Times\"])))\nVar=[] \n\"\"\"\n[31 43 53 99] [164 140 141 88] Var > 300\n[31 35 43 44 53 85 89 98 99] [164 163 140 140 141 96 94 83 88] Var > 250\n[185,95] is Taoyuan in Wen-Hao's experiment\n\"\"\"\nxgrid=31\nygrid=164\nprint(\"===\\n(\"+str(xgrid)+\", \"+str(ygrid)+\")\")\nprint(\"lat: \"+str(Fil.variables[\"XLAT\"][0,xgrid,ygrid])+\", lon: \"+str(Fil.variables[\"XLONG\"][0,xgrid,ygrid]))\ndSM1=FilExp.variables['SMOIS'][0,0,xgrid,ygrid]-Fil.variables['SMOIS'][0,0,xgrid,ygrid]\ndSM2=FilExp.variables['SMOIS'][0,1,xgrid,ygrid]-Fil.variables['SMOIS'][0,1,xgrid,ygrid]\ndSM3=FilExp.variables['SMOIS'][0,2,xgrid,ygrid]-Fil.variables['SMOIS'][0,2,xgrid,ygrid]\ndSM4=FilExp.variables['SMOIS'][0,3,xgrid,ygrid]-Fil.variables['SMOIS'][0,3,xgrid,ygrid]\ndSM1e=FilExp.variables['SMOIS'][36,0,xgrid,ygrid]-Fil.variables['SMOIS'][36,0,xgrid,ygrid]\ndSM2e=FilExp.variables['SMOIS'][36,1,xgrid,ygrid]-Fil.variables['SMOIS'][36,1,xgrid,ygrid]\ndSM3e=FilExp.variables['SMOIS'][36,2,xgrid,ygrid]-Fil.variables['SMOIS'][36,2,xgrid,ygrid]\ndSM4e=FilExp.variables['SMOIS'][36,3,xgrid,ygrid]-Fil.variables['SMOIS'][36,3,xgrid,ygrid]\ndUDROFF=FilExp.variables['UDROFF'][36,xgrid,ygrid]-Fil.variables['UDROFF'][36,xgrid,ygrid]\ndSFROFF=FilExp.variables['SFROFF'][36,xgrid,ygrid]-Fil.variables['SFROFF'][36,xgrid,ygrid]\ndPrec=FilExp.variables['RAINNC'][36,xgrid,ygrid]-Fil.variables['RAINNC'][36,xgrid,ygrid]\ndET=FilExp.variables['ACLHF'][36,xgrid,ygrid]-Fil.variables['ACLHF'][36,xgrid,ygrid]\nET_ctr=Fil.variables['ACLHF'][36,xgrid,ygrid]\n\nprint(\"===\\ndPrec (mm): \"+str(dPrec))\nprint(\"I = Irrgation amount / soil moisture diff at 0th hr (mm): \"+str((0.1*dSM1+0.3*dSM2+0.6*dSM3+1*dSM4)*1000))\nprint(\"dET (mm): \"+str(dET/28.94/86400))\nprint(\"dSM = Soil moisture diff at 36th hr (mm): \"+str((0.1*dSM1e+0.3*dSM2e+0.6*dSM3e+1*dSM4e)*1000))\nprint(\"dUDROFF (mm): \"+str(dUDROFF))\nprint(\"dSFROFF (mm): \"+str(dSFROFF))\nresidual=dPrec+(0.1*dSM1+0.3*dSM2+0.6*dSM3+1*dSM4)-dET/28.94/86400/1000-(0.1*dSM1e+0.3*dSM2e+0.6*dSM3e+1*dSM4e)-dUDROFF/1000-dSFROFF/1000#residual is in m\nprint(\"dPrec + I -dET - dSM- dUDROFF - dSFROFF = residual (mm): \"+str(residual*1000))\nprint(\"===\\nET_ctr (mm): \"+str(ET_ctr/28.94/86400))\n","repo_name":"loobahpng/wrf_plotting","sub_path":"ifWaterConserved.py","file_name":"ifWaterConserved.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"11072226097","text":"import math,random,copy\nfrom model.graph import Graph\nimport copy\nimport numpy as np\n\nclass Chromosome_RK(object):\n def __init__(self,population,nodegene=-1,edgegene=-1):\n self.population = population\n self.num_nodes = population.getGraphSize()\n self.pageNumber = self.population.getPageNumber()\n self.num_edges = len(self.population.getEdgeList())\n\n if(nodegene == -1):\n self.nodegene = [random.random() for x in range(self.num_nodes)]\n else:\n self.nodegene = nodegene\n\n if type(edgegene)==int and edgegene != -1:\n self.edgegene = self.ibase(edgegene, self.pageNumber, self.num_edges)\n elif edgegene == -1:\n self.edgegene = []\n for i in range(self.num_edges):\n self.edgegene.append( random.randint(0,self.pageNumber-1) )\n else:\n self.edgegene = edgegene\n \n #if edgegene==-1:\n # self.edgegene = random.randint(0,self.pageNumber**self.num_edges-1)\n \n self._num_crossings = -1\n self.graph = None\n \n def recombine(self, other):\n newNodegene = self.recombineNodeGene(other)\n newEdgegene = self.recombineEdgeGene(other)\n return Chromosome_RK(self.population, newNodegene, newEdgegene)\n \n \n def recombineNodeGene(self, other):\n n = random.randint(0,len(self.nodegene)-1)\n if random.randint(0,1):\n newNodegene = self.nodegene[:n] + other.nodegene[n:]\n else:\n newNodegene = other.nodegene[:n] + self.nodegene[n:]\n return newNodegene\n \n def recombineEdgeGene(self, other):\n n = random.randint(0,len(self.edgegene)-1)\n if random.randint(0,1):\n newEdgegene = self.edgegene[:n] + other.edgegene[n:] \n else:\n newEdgegene = other.edgegene[:n] + self.edgegene[n:]\n return newEdgegene\n \n \n def numCrossings(self):\n if self._num_crossings == -1:\n self._num_crossings = self.getGraph().numCrossings()\n return self._num_crossings\n\n def node_rk_generator(self):\n indices = np.argsort(self.nodegene)\n for ind in indices:\n yield ind\n\n def edge_generator(self):\n num = self.num_edges\n #pages = self.ibase(self.edgegene, self.pageNumber, self.num_edges)\n for i,edge in enumerate(self.population.getEdgeList()):\n yield (edge[0], edge[1], self.edgegene[i])\n\n def getGraph(self):\n if self.graph == None:\n \n self.graph = Graph()\n self.graph.initFromLists(self.pageNumber, self.node_rk_generator(), self.edge_generator())\n return self.graph\n \n \n# def edge_generator(self):\n# edges = self.population.getEdgeList()\n# for edge in edges:\n# yield edge\n \n def ibase(self, n, radix, minlen):\n r = []\n while n:\n n,p = divmod(n, radix)\n r.append(p)\n r.reverse()\n r = [0]*(minlen-len(r)) + r\n return r\n \n def __lt__(self, other):\n return self.numCrossings() < other.numCrossings()\n \n def __gt__(self, other):\n return self.numCrossings() > other.numCrossings()","repo_name":"powertomato/heuOpt_2017_G1","sub_path":"solvers/evolution/Chromosome_RandKey.py","file_name":"Chromosome_RandKey.py","file_ext":"py","file_size_in_byte":3283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71231035574","text":"# Створити лист із днями тижня.\n# В один рядок (ну або як завжди) створити словник виду: {1: “Monday”, 2:...\n# Також в один рядок або як вдасться створити зворотний словник {“Monday”: 1,\n\nlist1 = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"]\n\nnew_dict = {i+1: list1[i] for i in range(0, 7) }\nprint(new_dict)\n\nmy_dict = {}\n\nfor key in new_dict:\n my_dict[new_dict[key]] = key\nprint(my_dict)\n\n\n# via list comprehension\n# my_dict2 = {list1[i]: i+1 for i in range(0, 7) }\n# print(my_dict2)","repo_name":"ALekunovych/Beetroot-PY_LekunovychAnna","sub_path":"lesson 7/lesson 7 _ task 4.py","file_name":"lesson 7 _ task 4.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41483504437","text":"import socket\nimport time\nfrom i2c_connect import STM\nfrom wheel import Wheel\nimport threading\n\nHOST = '192.168.199.2'\nPORT1 = 62221\nTARG = '192.168.199.1'\nPORT2 = 62222\n\nstm = STM(0x69)\nwheel = Wheel()\ndef transfer_wheel():\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:\n while True:\n rotate = wheel.getRotate()\n s.sendto(rotate.to_bytes(2, 'little', signed = True), (TARG, PORT1))\n\ndef transfer_pad():\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:\n while True:\n X = int(stm.getX())\n Y = int(stm.getY())\n s.sendto(X.to_bytes(2, 'little', signed = False), (TARG, PORT2))\n s.sendto(Y.to_bytes(2, 'little', signed = False), (TARG, PORT2))\n\nthreads = []\nif __name__ == '__main__':\n \n threading.Thread(target = transfer_wheel).start()\n threading.Thread(target = transfer_pad).start()\n \n","repo_name":"huroy5518/project_group_17_pi","sub_path":"socket_conn.py","file_name":"socket_conn.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34957144798","text":"import cv2\nimport numpy as np\n\n# Create a black image\nimg = np.zeros((512, 512, 3), np.uint8)\n\nimg = cv2.rectangle(img, (384, 0), (510, 128), (0, 255, 0), 3)\n\ncv2.imshow('image', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"jacegem/OpenCV-Python-Tutorials","sub_path":"04. OpenCV의 그리기 함수/02_직사각형 그리기.py","file_name":"02_직사각형 그리기.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"43295386301","text":"from __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nfrom ..reconstruction import (Hologram, rebin_image, _find_peak_centroid,\n random_seed, _crop_image, CropEfficiencyWarning)\n\nimport numpy as np\nnp.random.seed(random_seed)\n\n\ndef _example_hologram(dim=2048):\n \"\"\"\n Generate example hologram.\n\n Parameters\n ----------\n dim : int\n Dimensions of image. Default is 2048.\n \"\"\"\n return 1000*np.ones((dim, dim)) + np.random.randn(dim, dim)\n\n\ndef test_load_hologram():\n holo = Hologram(_example_hologram())\n assert holo is not None\n\n\ndef test_rebin_image():\n dim = 2048\n full_res = _example_hologram(dim=dim)\n assert (dim//2, dim//2) == rebin_image(full_res, 2).shape\n\n\ndef _gaussian2d(amplitude, width, centroid, dim):\n x, y = np.mgrid[0:dim, 0:dim]\n x_centroid, y_centroid = centroid\n return amplitude*np.exp(-0.5 * ((x - x_centroid)**2/width**2 +\n (y - y_centroid)**2/width**2))\n\n\ndef test_centroid():\n centroid = (265, 435)\n test_image = _gaussian2d(amplitude=10, width=5, centroid=centroid, dim=1024)\n assert np.all(_find_peak_centroid(image=test_image) == centroid)\n assert np.all(test_image[centroid] == np.max(test_image))\n\n\ndef test_crop_image():\n # Even number rows/cols\n image1 = np.arange(1024).reshape((32, 32))\n new_shape1 = (image1.shape[0]//2, image1.shape[1]//2)\n cropped_image1 = _crop_image(image1, 0.5)\n assert new_shape1 == cropped_image1.shape\n\n # Odd number rows/cols\n image2 = np.arange(121).reshape((11, 11))\n new_shape2 = (image2.shape[0]//2, image2.shape[1]//2)\n cropped_image2 = _crop_image(image2, 0.5)\n assert new_shape2 == cropped_image2.shape\n\n\ndef test_multiple_reconstructions():\n \"\"\"\n At commit cc730bd and earlier, the Hologram.apodize function modified\n the Hologram.hologram array every time Hologram.reconstruct was called.\n This tests that that should not happen anymore.\n\n Also test that the caching machinery is working.\n \"\"\"\n\n propagation_distances = [0.5, 0.8]\n holo = Hologram(_example_hologram())\n h_raw = holo.hologram.copy()\n holograms = []\n\n for d in propagation_distances:\n w = holo.reconstruct(d, cache=True)\n holograms.append(holo.hologram)\n\n # check hologram doesn't get modified in place first time\n assert np.all(h_raw == holograms[0])\n\n # check hologram doesn't get modified again\n assert np.all(holograms[0] == holograms[1])\n\n # check that the cached reconstructions exist\n for d in propagation_distances:\n assert d in holo.reconstructions\n\n\ndef test_nonsquare_hologram():\n sq_holo = _example_hologram()\n nonsq_holo = sq_holo[:-10, :]\n\n holo = Hologram(nonsq_holo)\n w = holo.reconstruct(0.5)\n\n phase_shape = w.phase.shape\n\n assert phase_shape[0] == min(nonsq_holo.shape)\n assert phase_shape[1] == min(nonsq_holo.shape)\n","repo_name":"bmorris3/shampoo","sub_path":"shampoo/tests/test_hologram.py","file_name":"test_hologram.py","file_ext":"py","file_size_in_byte":2977,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"21"} +{"seq_id":"11574803996","text":"from tkinter import *\r\nimport datetime\r\nimport time\r\nimport winsound\r\n\r\ndef alarm(set_alarm_timer):\r\n while True:\r\n time.sleep(1)\r\n current_time = datetime.datetime.now()\r\n now = current_time.strftime(\"%H:%M:%S\")\r\n date = current_time.strftime(\"%d/%m/%Y\")\r\n print(\"The Time is:\",date)\r\n print(now)\r\n if now == set_alarm_timer:\r\n print(\"Time to wake up!\")\r\n winsound.PlaySound(\"sound.wav\",winsound.SND_ASYNC)\r\n break\r\n\r\ndef actual_time():\r\n set_alarm_timer = f\"{hour.get()}:{min.get()}:{sec.get()}\"\r\n alarm(set_alarm_timer)\r\n\r\nclock = Tk()\r\nclock.title(\"Alarm Clock\")\r\nclock.geometry(\"300x200\")\r\ntime_format=Label(clock, text= \"Enter time in the 24-hour format!\", fg=\"green\",font=\"Arial\").place(x=36,y=120)\r\naddTime = Label(clock,text = \"Hr Min Sec\",font=(\"Calibri\",13,\"normal\")).place(x = 110)\r\nsetYourAlarm = Label(clock,text = \"Alarm Time:\",fg=\"blue\",font=(\"Helevetica\",11,\"bold\")).place(x=15, y=29)\r\n\r\nhour = StringVar()\r\nmin = StringVar()\r\nsec = StringVar()\r\n\r\nhourTime= Entry(clock,textvariable = hour,bg = \"pink\",width = 15).place(x=110,y=30)\r\nminTime= Entry(clock,textvariable = min,bg = \"pink\",width = 15).place(x=150,y=30)\r\nsecTime = Entry(clock,textvariable = sec,bg = \"pink\",width = 15).place(x=200,y=30)\r\n\r\nsubmit = Button(clock,text = \"Set Alarm\",fg=\"red\",width = 10,command = actual_time).place(x =110,y=70)\r\n\r\nclock.mainloop()\r\n","repo_name":"CodeMaster7000/Tkinter-Alarm-Clock","sub_path":"Tkinter Alarm Clock.py","file_name":"Tkinter Alarm Clock.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"9073641995","text":"import os\nimport logging\nimport tempfile\nimport itertools\n\nimport numpy as np\n\nfrom ConfigSpace.read_and_write import json as pcs_json\nfrom ConfigSpace.read_and_write import pcs_new\nfrom ConfigSpace.configuration_space import Configuration, ConfigurationSpace\nfrom ConfigSpace.hyperparameters import CategoricalHyperparameter\nfrom smac.tae.execute_ta_run import StatusType\nfrom smac.runhistory.runhistory import RunHistory\nfrom smac.optimizer.objective import average_cost\nfrom smac.scenario.scenario import Scenario\nfrom smac.stats.stats import Stats\nfrom smac.utils.io.output_writer import OutputWriter\nfrom smac.utils.io.traj_logging import TrajLogger, TrajEntry\n\nclass HpBandSter2SMAC(object):\n\n def __init__(self):\n self.logger = logging.getLogger(self.__module__ + '.' + self.__class__.__name__)\n\n def convert(self, folders, output_dir=None):\n \"\"\"Convert hpbandster-results into smac-format, aggregating parallel runs along the budgets, so it is treated as\n one run with the same budgets. Throws ValueError when budgets of individual runs dont match.\n\n Parameters\n ----------\n folders: List[str]\n list of runs to consider\n output_dir: str\n path to CAVE's output-directory\n\n Returns\n -------\n result: hpbandster.core.result\n BOHB-result in original format\n paths: List[str]\n paths to converted data\n budgets: List[int]\n budgets, corresponding to paths\n \"\"\"\n try:\n from hpbandster.core.result import Result as HPBResult\n from hpbandster.core.result import logged_results_to_HBS_result\n except ImportError as e:\n raise ImportError(\"To analyze BOHB-data, please install hpbandster (e.g. `pip install hpbandster`)\")\n\n folder2result = {f : logged_results_to_HBS_result(f) for f in folders}\n\n # backup_cs is a list with alternative interpretations of the configspace-file (if it's a .pcs-file)\n cs, backup_cs = self.load_configspace(folders[0])\n\n # Using temporary files for the intermediate smac-result-like format\n if not output_dir:\n self.logger.debug(\"New outputdir\")\n output_dir = tempfile.mkdtemp()\n budgets, paths = zip(*self.hpbandster2smac(folder2result, cs, backup_cs, output_dir).items())\n\n return list(folder2result.values())[0], paths, budgets\n\n def load_configspace(self, folder):\n \"\"\"Will try to load the configspace. If it's a pcs-file, backup_cs will be a list containing all possible\n combinations of interpretation for Categoricals. If this issue will be fixed, we can drop this procedure.\"\"\"\n cs_fn_json = os.path.join(folder, 'configspace.json')\n cs_fn_pcs = os.path.join(folder, 'configspace.pcs')\n if os.path.exists(cs_fn_json):\n with open(cs_fn_json, 'r') as fh:\n cs = pcs_json.read(fh.read())\n backup_cs = []\n self.logger.debug(\"Detected and loaded \\\"%s\\\". No backup-cs necessary\", cs_fn_json)\n elif os.path.exists(cs_fn_pcs):\n with open(cs_fn_pcs, 'r') as fh:\n cs = pcs_new.read(fh.readlines())\n # Create alternative interpretations\n categoricals = [hp for hp in cs.get_hyperparameters() if isinstance(hp, CategoricalHyperparameter)]\n non_categoricals = [hp for hp in cs.get_hyperparameters() if not isinstance(hp, CategoricalHyperparameter)]\n\n def _get_interpretations(choices):\n result = []\n if set(choices) == {\"True\", \"False\"}:\n result.append([True, False])\n if all([c.isdigit() for c in choices]):\n result.append([int(c) for c in choices])\n result.append(choices)\n return result\n\n choices_per_cat = [_get_interpretations(hp.choices) for hp in categoricals]\n combinations = itertools.product(*choices_per_cat)\n self.logger.debug(combinations)\n backup_cs = []\n for combi in combinations:\n bcs = ConfigurationSpace()\n for hp in non_categoricals:\n bcs.add_hyperparameter(hp)\n for name, choices in zip([hp.name for hp in categoricals], combi):\n bcs.add_hyperparameter(CategoricalHyperparameter(name, choices))\n bcs.add_conditions(cs.get_conditions())\n backup_cs.append(bcs)\n\n self.logger.debug(\"Sampled %d interpretations of \\\"%s\\\"\", len(backup_cs), cs_fn_pcs)\n self.logger.debug(choices_per_cat)\n else:\n raise ValueError(\"Missing pcs-file at '%s.[pcs|json]'!\" % os.path.join(folder, 'configspace'))\n return cs, backup_cs\n\n\n def _get_config(self, config_id, id2config, cs):\n config = Configuration(cs, id2config[config_id]['config'])\n try:\n model_based_pick = id2config[config_id]['config_info']['model_based_pick']\n config.origin = 'Model based pick' if model_based_pick else 'Random'\n except KeyError:\n self.logger.debug(\"No origin for config!\", exc_info=True)\n return config\n\n def hpbandster2smac(self, folder2result, cs: ConfigurationSpace, backup_cs, output_dir: str):\n \"\"\"Reading hpbandster-result-object and creating RunHistory and trajectory...\n treats each budget as an individual 'smac'-run, creates an\n output-directory with subdirectories for each budget.\n\n Parameters\n ----------\n folder2result: Dict(str : hpbandster.core.result.Result)\n folder mapping to bohb's result-objects\n cs: ConfigurationSpace\n the configuration space\n backup_cs: List[ConfigurationSpace]\n if loading a configuration fails, try configspaces from this list until succeed\n output_dir: str\n the output-dir to save the smac-runs to\n \"\"\"\n # Create runhistories (one per budget)\n budget2rh = {}\n for folder, result in folder2result.items():\n id2config_mapping = result.get_id2config_mapping()\n skipped = {'None' : 0, 'NaN' : 0}\n for run in result.get_all_runs():\n if not run.budget in budget2rh:\n budget2rh[run.budget] = RunHistory(average_cost)\n rh = budget2rh[run.budget]\n\n # Load config...\n try:\n config = self._get_config(run.config_id, id2config_mapping, cs)\n except ValueError as err:\n self.logger.debug(\"Loading configuration failed... trying alternatives\", exc_info=1)\n for bcs in backup_cs:\n try:\n config = self._get_config(run.config_id, id2config_mapping, bcs)\n cs = bcs\n break\n except ValueError:\n self.logger.debug(\"\", exc_info=1)\n pass\n else:\n self.logger.debug(\"None of the alternatives worked...\")\n raise ValueError(\"Your configspace seems to be corrupt. If you use floats (or mix up ints, bools and strings) as categoricals, \"\n \"please consider using the .json-format, as the .pcs-format cannot recover the type \"\n \"of categoricals. Otherwise please report this to \"\n \"https://github.com/automl/CAVE/issues (and attach the debug.log)\")\n\n if run.loss is None:\n skipped['None'] += 1\n continue\n if np.isnan(run.loss):\n skipped['NaN'] += 1\n continue\n\n rh.add(config=config,\n cost=run.loss,\n time=run.time_stamps['finished'] - run.time_stamps['started'],\n status=StatusType.SUCCESS,\n seed=0,\n additional_info={'info' : run.info, 'timestamps': run.time_stamps})\n\n self.logger.debug(\"Skipped %d None- and %d NaN-loss-values in BOHB-result\", skipped['None'], skipped['NaN'])\n\n # Write to disk\n budget2path = {} # paths to individual budgets\n self.logger.info(\"Assuming BOHB treats target algorithms as deterministic (and does not re-evaluate)\")\n for b, rh in budget2rh.items():\n output_path = os.path.join(output_dir, 'budget_' + str(b))\n budget2path[b] = output_path\n\n scenario = Scenario({'run_obj' : 'quality',\n 'cs' : cs,\n 'output_dir' : output_dir,\n 'deterministic' : True, # At the time of writing, BOHB is always treating ta's as deterministic\n })\n scenario.output_dir_for_this_run = output_path\n scenario.write()\n\n with open(os.path.join(output_path, 'configspace.json'), 'w') as fh:\n fh.write(pcs_json.write(cs))\n\n rh.save_json(fn=os.path.join(output_path, 'runhistory.json'))\n self.get_trajectory(folder2result, output_path, scenario, rh, budget=b)\n\n return budget2path\n\n def get_trajectory(self, folder2result, output_path, scenario, rh, budget=None):\n \"\"\"\n If budget is specified, get trajectory for only that budget. Else use hpbandster's averaging.\n If multiple results are specified, sort by times_finished and only add to combined trajectory if loss is better\n \"\"\"\n cs = scenario.cs\n\n if not output_path:\n output_path = tempfile.mkdtemp()\n\n traj_logger = TrajLogger(output_path, Stats(scenario))\n total_traj_dict = []\n for f, result in folder2result.items():\n if budget:\n traj_dict = self.get_incumbent_trajectory_for_budget(result, budget)\n else:\n traj_dict = result.get_incumbent_trajectory()\n\n id2config_mapping = result.get_id2config_mapping()\n\n for config_id, time, budget, loss in zip(traj_dict['config_ids'], traj_dict['times_finished'], traj_dict['budgets'], traj_dict['losses']):\n incumbent = self._get_config(config_id, id2config_mapping, cs)\n try:\n incumbent_id = rh.config_ids[incumbent]\n except KeyError as e:\n # This config was not evaluated on this budget, just skip it\n continue\n except:\n raise\n total_traj_dict.append({'config_id' : incumbent_id, 'time_finished' : time, 'budget' : budget, 'loss' : loss})\n\n last_loss = np.inf\n for element in sorted(total_traj_dict, key=lambda x: x['time_finished']):\n incumbent_id = element[\"config_id\"]\n incumbent = rh.ids_config[incumbent_id]\n time = element[\"time_finished\"]\n loss = element[\"loss\"]\n\n if loss > last_loss:\n continue\n\n ta_runs = -1\n ta_time_used = -1\n wallclock_time = time\n train_perf = loss\n # add\n traj_logger.trajectory.append({\"cpu_time\": ta_time_used,\n \"total_cpu_time\": None, # TODO: fix this\n \"wallclock_time\": wallclock_time,\n \"evaluations\": ta_runs,\n \"cost\": train_perf,\n \"incumbent\": incumbent\n })\n traj_logger._add_in_old_format(train_perf, incumbent_id, incumbent, ta_time_used, wallclock_time)\n traj_logger._add_in_aclib_format(train_perf, incumbent_id, incumbent, ta_time_used, wallclock_time)\n return traj_logger.trajectory\n\n def get_incumbent_trajectory_for_budget(self, result, budget):\n \"\"\"\n Returns the best configurations over time\n\n Parameters\n ----------\n budget: string\n TODO\n result: Result\n result object with runs\n\n Returns\n -------\n dict:\n dictionary with all the config IDs, the times the runs\n finished, their respective budgets, and corresponding losses\n \"\"\"\n all_runs = result.get_all_runs(only_largest_budget=False)\n\n #if not all_budgets:\n # all_runs = list(filter(lambda r: r.budget==res.HB_config['max_budget'], all_runs))\n\n all_runs.sort(key=lambda r: (r.budget, r.time_stamps['finished']))\n\n #self.logger.debug(\"all runs %s\", str(all_runs))\n\n return_dict = { 'config_ids' : [],\n 'times_finished': [],\n 'budgets' : [],\n 'losses' : [],\n }\n\n current_incumbent = float('inf')\n incumbent_budget = result.HB_config['min_budget']\n\n for r in all_runs:\n if r.loss is None: continue\n if r.budget != budget: continue\n\n new_incumbent = False\n\n if r.loss < current_incumbent:\n new_incumbent = True\n\n if new_incumbent:\n current_incumbent = r.loss\n\n return_dict['config_ids'].append(r.config_id)\n return_dict['times_finished'].append(r.time_stamps['finished'])\n return_dict['budgets'].append(r.budget)\n return_dict['losses'].append(r.loss)\n\n if current_incumbent != r.loss:\n r = all_runs[-1]\n\n return_dict['config_ids'].append(return_dict['config_ids'][-1])\n return_dict['times_finished'].append(r.time_stamps['finished'])\n return_dict['budgets'].append(return_dict['budgets'][-1])\n return_dict['losses'].append(return_dict['losses'][-1])\n\n\n return (return_dict)\n","repo_name":"timothyyu/ml_monorepo","sub_path":"CAVE/cave/utils/hpbandster2smac.py","file_name":"hpbandster2smac.py","file_ext":"py","file_size_in_byte":14141,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"21"} +{"seq_id":"2226728597","text":"from django.urls import include, path\nfrom . import views\n\nurlpatterns = [\n\n path('', views.welcome),\n path('api/hawa/', views.AirQualityIndexAPI.as_view(), name='Air Quality Index API'),\n path('message/new/', views.message_new, name='message_new'),\n path('aqi/', views.aqi_detail, name='aqi_detail'),\n]\n","repo_name":"hawa-ko-reporter/api","sub_path":"project/subscriptions/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"45201111335","text":"name = 'zack'\ndevices = ['laptop', 'smartphone', 'tablet']\n\ndef display(arg):\n print(f'arg = {arg}')\n # print('arg= ' +arg)\n\nimport person\nprint(person.name)\n\n#if you are using Goole Collaboratory, you will find similar result\nimport sys\nprint(sys.path)","repo_name":"apeloeza/ocbc-batch-3-python","sub_path":"sesi-3/person.py","file_name":"person.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5577107454","text":"#!/usr/bin/env python\n\n\"\"\"\ncpu.py: test iperf bandwidth for varying cpu limits\n\nSince we are limiting the hosts (only), we should expect the iperf\nprocesses to be affected, as well as any system processing which is\nbilled to the hosts.\n\nWe reserve >50% of cycles for system processing; we assume that\nthis is enough for it not to affect results. Hosts are limited to\n40% of total cycles, which we assume is enough to make them CPU\nbound.\n\nAs CPU performance increases over time, we may have to reduce the\noverall CPU allocation so that the host processing is still CPU bound.\nThis is perhaps an argument for specifying performance in a more\nsystem-independent manner.\n\nIt would also be nice to have a better handle on limiting packet\nprocessing cycles. It's not entirely clear to me how those are\nbilled to user or system processes if we are using OVS with a kernel\ndatapath. With a user datapath, they are easier to account for, but\noverall performance is usually lower.\n\nAlthough the iperf client uses more CPU and should be CPU bound (?),\nwe measure the received data at the server since the client transmit\nrate includes buffering.\n\"\"\"\n\nfrom mininet.net import Mininet\nfrom mininet.node import CPULimitedHost\nfrom mininet.topolib import TreeTopo\nfrom mininet.util import custom, waitListening, decode\nfrom mininet.log import setLogLevel, info\nfrom mininet.clean import cleanup\n\ndef bwtest( cpuLimits, period_us=100000, seconds=10 ):\n \"\"\"Example/test of link and CPU bandwidth limits\n cpu: cpu limit as fraction of overall CPU time\"\"\"\n\n topo = TreeTopo( depth=1, fanout=2 )\n\n results = {}\n\n for sched in 'rt', 'cfs':\n info( '*** Testing with', sched, 'bandwidth limiting\\n' )\n for cpu in cpuLimits:\n # cpu is the cpu fraction for all hosts, so we divide\n # it across two hosts\n host = custom( CPULimitedHost, sched=sched,\n period_us=period_us,\n cpu=.5*cpu )\n try:\n net = Mininet( topo=topo, host=host, waitConnected=True )\n # pylint: disable=bare-except\n except: # noqa\n info( '*** Skipping scheduler %s and cleaning up\\n' % sched )\n cleanup()\n break\n net.start()\n net.pingAll()\n hosts = [ net.getNodeByName( h ) for h in topo.hosts() ]\n client, server = hosts[ 0 ], hosts[ -1 ]\n info( '*** Starting iperf with %d%% of CPU allocated to hosts\\n' %\n ( 100.0 * cpu ) )\n # We measure at the server because it doesn't include\n # the client's buffer fill rate\n popen = server.popen( 'iperf -yc -s -p 5001' )\n waitListening( client, server, 5001 )\n client.cmd( 'iperf -yc -t %s -c %s' % ( seconds, server.IP() ) )\n # ignore empty result from waitListening/telnet for old iperf\n svals = {}\n while not svals or int( svals[ 'rate' ] ) == 0:\n line = decode( popen.stdout.readline() )\n # Probably shouldn't depend on an internal method, but\n # this is the easiest way\n svals = Mininet._iperfVals( # pylint: disable=protected-access\n line, server.IP() )\n bps = float( svals[ 'rate' ] )\n popen.terminate()\n net.stop()\n updated = results.get( sched, [] )\n updated += [ ( cpu, bps ) ]\n results[ sched ] = updated\n\n return results\n\n\ndef dump( results ):\n \"Dump results\"\n\n fmt = '%s\\t%s\\t%s\\n'\n\n info( '\\n' )\n info( fmt % ( 'sched', 'cpu', 'received bits/sec' ) )\n\n for sched in sorted( results.keys() ):\n entries = results[ sched ]\n for cpu, bps in entries:\n pct = '%d%%' % ( cpu * 100 )\n mbps = '%.2e' % bps\n info( fmt % ( sched, pct, mbps ) )\n\n\nif __name__ == '__main__':\n setLogLevel( 'info' )\n # These are the limits for the hosts/iperfs - the\n # rest is for system processes\n limits = [ .5, .4, .3, .2, .1 ]\n out = bwtest( limits )\n dump( out )\n","repo_name":"mininet/mininet","sub_path":"examples/cpu.py","file_name":"cpu.py","file_ext":"py","file_size_in_byte":4136,"program_lang":"python","lang":"en","doc_type":"code","stars":4978,"dataset":"github-code","pt":"21"} +{"seq_id":"41923219906","text":"import numpy as np\nimport cv2\n\ndef pyramid():\n img = cv2.imread('E:/Python_Study/OpenCV/images/model.jpg', cv2.IMREAD_GRAYSCALE)\n tmp = img.copy()\n\n win_titles = ['Level 1', 'Level 2', 'Level 3']\n g_down = []\n g_up = []\n\n g_down.append(tmp)\n\n for i in range(3):\n tmp1 = cv2.pyrDown(tmp)\n g_down.append(tmp1)\n tmp = tmp1\n \n cv2.imshow('Org', tmp)\n\n for i in range(3):\n tmp1 = cv2.pyrUp(tmp)\n g_up.append(tmp1)\n tmp = tmp1\n \n for i in range(3):\n cv2.imshow(win_titles[i], g_up[i])\n \n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\npyramid()","repo_name":"yongil1222/Python_Study","sub_path":"OpenCV/Ex16-2.ImagePyramid2.py","file_name":"Ex16-2.ImagePyramid2.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73256172532","text":"'''\nQuestion link [Project Euler Problem 2](https://www.hackerrank.com/contests/projecteuler/challenges/euler002/problem?isFullScreen=true)\n'''\n# f = [1,2,3,5,8,13,21,34,55,89,144,233]\n# n = 100\n# sum = 2 + 8 + 34 = 44\n#!/bin/python3\n\nimport sys\n\n\ndef solution(n):\n # n = 10\n a = 1\n b = 2\n s = b\n while True:\n c = a + b\n if c > n:\n break\n if c % 2 == 0 and c < n:\n s += c\n a = b\n b = c\n return s\n\n\nt = int(input().strip())\nfor a0 in range(t):\n n = int(input().strip())\n print(solution(n))\n","repo_name":"shubhaseesh/DSA","sub_path":"project-euler/problem_2_solution.py","file_name":"problem_2_solution.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10464495916","text":"import cv2\nimport matplotlib.pyplot as plt\nimport imutils\nimport math\n\n# INSTRUCTIONS: \n# open the library and see which image you want to test it on \n# do not include \".png\" \nname = input('what is the name of the image? \\n')\noriginal_img = cv2.imread('./library/' + name + '.png')\n\n# convert the input image to grayscale\ngray = cv2.cvtColor(original_img, cv2.COLOR_BGR2GRAY)\n\n# apply thresholding to convert grayscale to binary image\nret,thresh = cv2.threshold(gray, 70, 255, cv2.THRESH_BINARY)\n\n# convert BGR to RGB to display using matplotlib\nimgRGB = cv2.cvtColor(original_img, cv2.COLOR_BGR2RGB)\nthresh = cv2.erode(thresh, None, iterations=2)\nthresh = cv2.dilate(thresh, None, iterations=2)\n\n# display Original and Binary Images\nplt.subplot(131),plt.imshow(imgRGB,cmap = 'gray'),plt.title('Original Image'), plt.axis('off')\nplt.subplot(132),plt.imshow(thresh,cmap = 'gray'),plt.title('Binary Image'),plt.axis('off')\nplt.show()\n\n# blue the image\nthresh = cv2.blur(thresh,(10,10)) \n\n# find contours in thresholded image, then grab the largest one\ncnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,\n\tcv2.CHAIN_APPROX_SIMPLE)\ncnts = imutils.grab_contours(cnts)\nc = max(cnts, key=cv2.contourArea)\n\n# compute the topmost point of the contour, it will be used to locate the hand\nextLeft = tuple(c[c[:, :, 0].argmin()][0])\nextTop = tuple(c[c[:, :, 1].argmin()][0])\ncX = extTop[0]\ncY = extTop[1]\nheight_divided_3 = imgRGB.shape[0] / 3\nwidth_divided_3 = imgRGB.shape[1] / 3\n\n# compute the location of the contour \nlocation = ''\nif int(cY//height_divided_3) == 1 and int(cX//width_divided_3) == 1:\n location = 'center'\nelif int(cY//height_divided_3) == 0 and int(cX//height_divided_3) == 0:\n location = 'upper left'\nelif int(cY//height_divided_3) == 0 and int(cX//width_divided_3) == 2:\n location = 'upper right'\nelif int(cY//height_divided_3) == 2 and int(cX//width_divided_3) == 0:\n location = 'bottom left'\nelif int(cY//height_divided_3) == 2 and int(cX//width_divided_3) == 2:\n location = 'bottom right'\nelse:\n location = 'unknown'\n\n# identify if it is a fist or a splay by finding the defects in convex hull with respect to hand\nhull = cv2.convexHull(c, returnPoints=False)\ndefects = cv2.convexityDefects(c, hull)\nn = 0\nfor i in range(defects.shape[0]):\n s,e,f,d = defects[i,0]\n start = tuple(c[s][0])\n end = tuple(c[e][0])\n far = tuple(c[f][0])\n \n # find the length of all sides of triangle\n x = math.dist(end, start)\n y = math.dist(far, start)\n z = math.dist(end, far)\n s = (x+y+z)/2\n ar = math.sqrt(s*(s-x)*(s-y)*(s-z))\n \n #distance between point and convex hull\n d=(2*ar)/x\n \n # apply cosine rule\n angle = math.acos((y**2 + z**2 - x**2)/(2*y*z)) * 57\n\n # ignore angles > 90 and ignore points very close to convex hull\n # this will clean up the points not affected by the hand \n if angle <= 90 and d>40:\n n += 1\n #cv2.circle(thresh, far, 5, (255,0,0), 5)\nhand = '' \nif n == 4:\n hand = 'splay'\nelif n == 0:\n #print(math.dist(extLeft, extTop))\n if math.dist(extLeft, extTop) > 200:\n hand = 'palm'\n else:\n hand = 'fist'\nelse:\n hand = 'unrecognized'\nif hand == 'palm':\n cv2.putText(thresh, hand, (int(width_divided_3), int(height_divided_3)), cv2.FONT_HERSHEY_SIMPLEX, 1, (105,105,105), 2)\nelse:\n cv2.putText(thresh, location + ', ' + hand, (int(width_divided_3), int(height_divided_3)), cv2.FONT_HERSHEY_SIMPLEX, 1, (105,105,105), 2)\n\n# show the output image\ncv2.imshow(\"Image\", thresh)\ncv2.waitKey(0)","repo_name":"daisyye0730/Gesture-Detection","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32345835367","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Welcome to the interactive page !\n# \n# Here you can interact with the different visualizations using direct manipulations or using widgets.\n# \n# If you find a \"Click to show\" button, then you can click to have the Python code of the cell.\n\n# Below you can directly interact with the plot !\n\n# In[1]:\n\n\n# import library \nimport pandas as pd \nimport matplotlib.pyplot as plt \nfrom myst_nb import glue\nimport plotly.express as px\n\n#Upload data\ndata_cols = [\"Number\",\"Recommendation\",\"Audit Question\",\"Weight\",\"Level\",\"Description\",\"Grade\"]\ndatas = pd.read_csv(\"data/manufactoring_on_board.csv\", header=0, names=data_cols, na_filter=False)\n\n#Plot an interactive graph\nfigX = px.scatter(datas, x=\"Weight\", y=\"Grade\", color=\"Level\")\nfigX\n\n\n# In[3]:\n\n\n#Create first widget with ipywidgets\nimport ipywidgets as widgets\nwidgets.IntSlider(\n value=7,\n min=0,\n max=10,\n step=1,\n description='Test:',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='d'\n)\n\n\n# Below you can select a range on the slider t only see the questions whose grade stands between the two extreme values (level widget has no effect for now)\n\n# In[4]:\n\n\n#Defines the min and max of the grade widget according to data\nmin_grade = int(datas.Grade.min())\nmax_grade = int(datas.Grade.max())\n#print(min_grade, max_grade)\n\n\n# In[5]:\n\n\n#Importations\nimport pandas as pd\nimport numpy as np\n\nimport plotly.graph_objects as go\nimport plotly.express as px\nimport plotly\n\nfrom plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\n\nfrom ipywidgets import interact, interactive, fixed, interact_manual\nfrom ipywidgets import GridspecLayout\nimport ipywidgets as widgets\n\n#Defines the different widgets for grade and level (level has no effect for the moment)\n\ngrade_from = widgets.Dropdown(\n options=np.arange(min_grade,max_grade+1),\n value=min_grade,\n description='Grade From:',\n disabled=False,\n width='100px'\n)\n\ngrade_to = widgets.Dropdown(\n options=np.arange(min_grade,max_grade+1),\n value=max_grade,\n description='Grade To:',\n disabled=False,\n width='100px'\n)\n\nlevel = widgets.Dropdown(\n options=[1,2,3,4],\n value=1,\n description='Level:',\n disabled=False,\n max_width='100px'\n)\n\n# Create containers\ncontainer_grade = widgets.VBox([widgets.HTML(value=\"Grade\"),grade_from, grade_to],justify_content='center',max_width='180px')\ncontainer_level = widgets.VBox([widgets.HTML(value=\"Level\"),level],justify_content='center',max_width='180px')\n\n# Create grids\ngrid = GridspecLayout(1, 2, width='900px', height='150px',grid_gap=\"10px\")\ngrid[0,0] = container_grade\ngrid[0,1] = container_level\n\n\n#Define the type of plot we want and all the parameters\nquestion_data = []\nfor index, row in datas.iterrows():\n # Each car is a marker on the scatter plot. \n dt_g = go.Scatter(\n x = (row['Weight'],),\n y = (row['Grade'],),\n mode='markers',\n marker=dict(\n size=16\n ),\n visible=True,\n hovertext = \"Weight : \" +str(row['Weight'])+\": (Level=\"+str(row['Level'])+\", Grade=\"+str(row['Grade'])+\")\"\n )\n question_data.append(dt_g)\n \n#Defines the layout of the plot\nlayout = dict(\n title='Grade and Level according to Weight',\n autosize=True,\n hovermode='closest',\n showlegend=False,\n xaxis_title=\"Weight\",\n yaxis_title=\"Grade\",\n )\n\n# Create figure\nfig = go.FigureWidget(data=question_data, layout=layout)\n\n\n# Function called whenever there is any change in any of the widgets to update the plot\n#Notice : the graph is not really updated but we hide some values with a filter\ndef response(change):\n \n # Store indices of the cars within the price range\n grade_filter = list(np.where((datas.Grade > grade_from.value) & (datas.Grade < grade_to.value))[0])\n # Store indices of the brand\n if(level.value=='all'):\n # Store all the indices\n level_filter = range(len(question_data))\n else:\n # Store indices of those cars which belong to the filtered brand\n level_filter = list(np.where(datas.Grade == level.value)[0])\n \n \n # Store indices of the intersection of all the filters. \n all_filters =list(set(grade_filter))\n #print(all_filters)\n # Set visibility for those cars at the intersection to true and rest to false\n visibility = [True if i in all_filters else False for i in range(len(question_data))]\n \n # Update visibility of the plot so only the ones filtered will be true and rest will be false\n fig.plotly_restyle({'visible': visibility})\n \n# Change Listener for widgets\ngrade_from.observe(response, names=\"value\")\ngrade_to.observe(response, names=\"value\")\nlevel.observe(response, names=\"value\")\n\n# Plot the widgets\nwidgets.VBox([grid, fig])\n\n\n# In[6]:\n\n\nfrom notebookjs import execute_js\n\n\n# In[7]:\n\n\nwith open(\"notebookJSfiles/draw_circle_lib.js\", \"r\") as f:\n draw_circle_lib = f.read()\n \nwith open(\"notebookJSfiles/d3.v3.min.js\", \"r\") as f:\n d3_lib = f.read()\n\nexecute_js([d3_lib, draw_circle_lib], \"draw_circle\", {\"color\": \"#4682B4\"})\n\n\n# In[8]:\n\n\nwith open(\"notebookJSfiles/radial_bar.css\", \"r\") as f:\n radial_bar_css = f.read()\n \nwith open (\"notebookJSfiles/radial_bar_lib.js\", \"r\") as f:\n radial_bar_lib = f.read()\n \nenergy = pd.read_csv(\"notebookJSfiles/energy.csv\")\n\nexecute_js(library_list=[d3_lib, radial_bar_lib], main_function=\"radial_bar\", \n data_dict=energy.to_dict(orient=\"records\"), css_list=[radial_bar_css])\n\n\n# In[9]:\n\n\nhelloworld_js = \"\"\"\nfunction helloworld(div_id, data){\n comm = new CommAPI(\"get_hello\", (ret) => {\n document.querySelector(div_id).textContent = ret.text;\n });\n setInterval(() => {comm.call({})}, 1000);\n comm.call({});\n}\n\"\"\"\n\nimport random\ndef hello_world_random(data):\n hello_world_languages = [\n \"Ola Mundo\", # Portuguese\n \"Hello World\", # English\n \"Hola Mundo\", # Spanish\n \"Geiá sou Kósme\", # Greek\n \"Kon'nichiwa sekai\", # Japanese\n \"Hallo Welt\", # German\n \"namaste duniya\" #Hindi\n ]\n i = random.randint(0, len(hello_world_languages)-1)\n return {'text': hello_world_languages[i]}\n\n#execute_js(helloworld_js, \"helloworld\", callbacks={\"get_hello\": hello_world_random})\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"csxADS/nrpmHandbook","sub_path":"_build/jupyter_execute/folderMiscellaneous/interactive.py","file_name":"interactive.py","file_ext":"py","file_size_in_byte":6321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73036497333","text":"from setuptools import find_packages\nfrom setuptools import setup\nimport sys\n\ninstall_requires = [\n 'atari_py',\n 'cached-property',\n 'chainer>=1.20.0.1',\n 'future',\n 'gym>=0.7.3',\n 'numpy>=1.10.4',\n 'pillow',\n 'scipy',\n]\n\ntest_requires = [\n 'nose',\n]\n\nif sys.version_info < (3, 2):\n install_requires.append('fastcache')\n\nif sys.version_info < (3, 4):\n install_requires.append('statistics')\n\nif sys.version_info < (3, 5):\n install_requires.append('funcsigs')\n\nsetup(name='chainerrl',\n version='0.0.1',\n description='ChainerRL, a deep reinforcement learning library',\n author='Yasuhiro Fujita',\n author_email='fujita@preferred.jp',\n license='MIT License',\n packages=find_packages(),\n install_requires=install_requires,\n test_requires=test_requires)\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/pfnet_chainerrl/chainerrl-master/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"27014736963","text":"# Testea la conexión al web service de Microglobal\n\nimport argparse\n\nimport os, sys\nsys.path.append(os.path.dirname(os.path.realpath(__file__)))\npath2root = os.path.join(os.path.dirname(__file__), \"..\")\nsys.path.append(path2root)\n\nfrom precios_mg_helpers import mg_get_brands_xml\nfrom notificaciones.notificador import StatusNotifier\n\nif __name__ == \"__main__\":\n\n channels = [\"TelegramObserver\", \"ConsoleObserver\", \\\n {\"NotionObserver\": {\"notion_script\": \"Web Service MG\"}}]\n\n # Configures Notifier and Observers\n mg_status_notifier = StatusNotifier()\n mg_status_notifier.attach_all(channels)\n\n # Testea la conexión al web service de Microglobal\n if mg_get_brands_xml():\n message = f'WS MG up'\n mg_status_notifier.notify_up(message=message)\n else:\n message = f' 🔴 🔴 Atención! El web service MG NO RESPONDE!'\n mg_status_notifier.notify_down(message=message)\n","repo_name":"moregain909/inea_produccion","sub_path":"precios_mg/mg_watchdog.py","file_name":"mg_watchdog.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3267155950","text":"# First non repeating number coding challenge is the easiest challenge in LeetCode (Correct me if I wrong)\n\n# To solve this coding challenge you can make it with time complexity O(n) and space complexity O(1)\n\n# Okey, lets start coding\n\nclass solution():\n def first_non_repeating(self, x, n):\n for i in range(n):\n j=0\n while j= 1]\n\nlines = [line.split() for line in lines]\n\n\nresults = {'p': {}, 'c': {}, 'i': {}}\nit = iter(lines)\n\nfor line in it:\n static = line[0] == 's'\n dist = line[1]\n load = float(line[2])\n alg = line[3]\n\n if static:\n alg = ('s', int(alg))\n\n if alg not in results[dist]:\n results[dist][alg] = {}\n\n if load not in results[dist][alg]:\n results[dist][alg][load] = {'rt': [], 'iv': []}\n\n if dist == 'p':\n ls = []\n for i in range(100):\n ls.append(float(next(it)[0]))\n results[dist][alg][load]['iv'].append(ls)\n\n response_time = float(next(it)[0])\n next(it)\n\n results[dist][alg][load]['rt'].append(response_time)\n\n\ndef list_algs(results):\n algs = set()\n for alg in results:\n algs.add(alg)\n return algs\n\n\ndef list_loads(results):\n loads = set()\n for item in results.values():\n for load in item:\n loads.add(load)\n return loads\n\n\ndef gen_rt_table(type, results, op=median):\n results = results[type]\n algs = list_algs(results)\n loads = list_loads(results)\n static_algs = [alg[1] for alg in algs if isinstance(alg, tuple)]\n static_algs.sort()\n dyn_algs = [alg for alg in algs if not isinstance(alg, tuple)]\n dyn_algs.sort()\n\n table = 'Load '\n for alg in static_algs:\n table += str(alg) + ' '\n for alg in dyn_algs:\n table += alg + ' '\n table += '\\n'\n for load in loads:\n table += str(load) + ' '\n for alg in static_algs:\n try:\n table += str(op(results[('s', alg)][load]['rt'])) + ' '\n except KeyError:\n table += '* '\n for alg in dyn_algs:\n try:\n table += str(op(results[alg][load]['rt'])) + ' '\n except KeyError:\n table += '* '\n table += '\\n'\n return table\n\n\n# results[dist][alg][load]['iv']\ndef poisson_compare(results, a1, a2, load):\n def join(ls1, ls2):\n return mean([e1 / e2 for e1, e2 in zip(ls1, ls2)])\n\n lss1 = results['p'][a1][load]['iv']\n lss2 = results['p'][a2][load]['iv']\n\n return median([join(ls1, ls2) for ls1, ls2 in zip(lss1, lss2)])\n\n\nprint(gen_rt_table('c', results))\nprint()\nprint(gen_rt_table('p', results))\n\n","repo_name":"rafaelms101/distributed","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2290441337","text":"import copy\nimport os\nfrom typing import Union\n\nfrom transformers import CONFIG_MAPPING, PretrainedConfig\nfrom transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES\nfrom transformers.utils import logging\n\nlogger = logging.get_logger(__name__)\n\n# TODO: make it configurable\ndef get_config_cls(model_type: str) -> PretrainedConfig:\n if model_type == \"blip_2_vision_model\":\n from transformers import Blip2VisionConfig\n return Blip2VisionConfig\n elif model_type == \"clip_vision_model\":\n from transformers import CLIPVisionConfig\n return CLIPVisionConfig\n elif model_type == \"blip_2_qformer\":\n from transformers import Blip2QFormerConfig\n return Blip2QFormerConfig\n elif model_type == \"openflamingo_perceiver_resampler\":\n ...\n else:\n return CONFIG_MAPPING[model_type]\n\n\nclass OpenFlamingoPerceiverResamplerConfig(PretrainedConfig):\n r\"\"\"\n This is the configuration class to store the configuration of a [`OpenFlamingoPerceiverResamplerModel`].\n It is used to instantiate a OpenFlamingo Perceiver Resampler model according to the specified arguments,\n defining the model architecture. Instantiating a configuration with the defaults will yield a similar\n configuration to that of the OpenFlamingo [vivym/openflamingo-9b](https://huggingface.co/vivym/openflamingo-9b)\n architecture.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs.\n Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n hidden_size (`int`, *optional*, defaults to 768):\n Dimensionality of the encoder layers and the pooler layer.\n num_hidden_layers (`int`, *optional*, defaults to 12):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 12):\n Number of attention heads for each attention layer in the Transformer encoder.\n intermediate_size (`int`, *optional*, defaults to 3072):\n Dimensionality of the \"intermediate\" (often named feed-forward) layer in the Transformer encoder.\n hidden_act (`str` or `Callable`, *optional*, defaults to `\"gelu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"silu\"` and `\"gelu_new\"` are supported.\n hidden_dropout_prob (`float`, *optional*, defaults to 0.1):\n The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):\n The dropout ratio for the attention probabilities.\n max_position_embeddings (`int`, *optional*, defaults to 512):\n The maximum sequence length that this model might ever be used with. Typically set this to something large\n just in case (e.g., 512 or 1024 or 2048).\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n layer_norm_eps (`float`, *optional*, defaults to 1e-12):\n The epsilon used by the layer normalization layers.\n position_embedding_type (`str`, *optional*, defaults to `\"absolute\"`):\n Type of position embedding. Choose one of `\"absolute\"`, `\"relative_key\"`, `\"relative_key_query\"`. For\n positional embeddings use `\"absolute\"`. For more information on `\"relative_key\"`, please refer to\n [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).\n For more information on `\"relative_key_query\"`, please refer to *Method 4* in [Improve Transformer Models\n with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).\n encoder_hidden_size (`int`, *optional*, defaults to 1408):\n The hidden size of the hidden states for cross-attention.\n \"\"\"\n\n model_type = \"openflamingo_perceiver_resampler\"\n\n def __init__(\n self,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n initializer_range=0.02,\n layer_norm_eps=1e-12,\n pad_token_id=0,\n position_embedding_type=\"absolute\",\n encoder_hidden_size=1408,\n **kwargs,\n ):\n super().__init__(pad_token_id=pad_token_id, **kwargs)\n\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.initializer_range = initializer_range\n self.layer_norm_eps = layer_norm_eps\n self.position_embedding_type = position_embedding_type\n self.encoder_hidden_size = encoder_hidden_size\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> \"PretrainedConfig\":\n config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)\n\n # get the qformer config dict if we are loading from OpenFlamingoConfig\n if config_dict.get(\"model_type\") == \"openflamingo\":\n config_dict = config_dict[\"resampler_config\"]\n\n if \"model_type\" in config_dict and hasattr(cls, \"model_type\") and config_dict[\"model_type\"] != cls.model_type:\n logger.warning(\n f\"You are using a model of type {config_dict['model_type']} to instantiate a model of type \"\n f\"{cls.model_type}. This is not supported for all configurations of models and can yield errors.\"\n )\n\n return cls.from_dict(config_dict, **kwargs)\n\n\nclass OpenFlamingoConfig(PretrainedConfig):\n r\"\"\"\n [`OpenFlamingoConfig`] is the configuration class to store the configuration of a [`OpenFlamingoForConditionalGeneration`].\n It is used to instantiate a OpenFlamingo model according to the specified arguments, defining the vision model,\n Perceiver Resampler model and language model configs. Instantiating a configuration with the defaults will yield a similar\n configuration to that of the OpenFlamingo [vivym/openflamingo-9b](https://huggingface.co/vivym/openflamingo-9b) architecture.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n Args:\n vision_config (`dict`, *optional*):\n Dictionary of configuration options used to initialize [`OpenFlamingoVisionConfig`].\n qformer_config (`dict`, *optional*):\n Dictionary of configuration options used to initialize [`OpenFlamingoQFormerConfig`].\n text_config (`dict`, *optional*):\n Dictionary of configuration options used to initialize any [`PretrainedConfig`].\n\n kwargs (*optional*):\n Dictionary of keyword arguments.\n\n Example:\n\n ```python\n >>> from transformers import (\n ... OpenFlamingoVisionConfig,\n ... OpenFlamingoQFormerConfig,\n ... OPTConfig,\n ... OpenFlamingoConfig,\n ... OpenFlamingoForConditionalGeneration,\n ... )\n\n >>> # Initializing a OpenFlamingoConfig with vivym/openflamingo-9b style configuration\n >>> configuration = OpenFlamingoConfig()\n\n >>> # Initializing a OpenFlamingoForConditionalGeneration (with random weights) from the vivym/openflamingo-9b style configuration\n >>> model = OpenFlamingoForConditionalGeneration(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n\n >>> # We can also initialize a OpenFlamingoConfig from a OpenFlamingoVisionConfig, OpenFlamingoQFormerConfig and any PretrainedConfig\n\n >>> # Initializing OpenFlamingo vision, OpenFlamingo Q-Former and language model configurations\n >>> vision_config = OpenFlamingoVisionConfig()\n >>> qformer_config = OpenFlamingoQFormerConfig()\n >>> text_config = OPTConfig()\n\n >>> config = OpenFlamingoConfig.from_text_vision_configs(vision_config, qformer_config, text_config)\n ```\"\"\"\n\n model_type = \"openflamingo\"\n is_composition = True\n\n def __init__(self, vision_config=None, resampler_config=None, text_config=None, **kwargs):\n super().__init__(**kwargs)\n\n if vision_config is None:\n vision_config = {}\n logger.info(\"vision_config is None. initializing the text config with default values (`CLIPVisionConfig`).\")\n\n if resampler_config is None:\n resampler_config = {}\n logger.info(\"resampler_config is None. Initializing the OpenFlamingoQFormerConfig with default values.\")\n\n if text_config is None:\n text_config = {}\n logger.info(\"text_config is None. Initializing the text config with default values (`LlaMaConfig`).\")\n\n vision_model_type = vision_config[\"model_type\"] if \"model_type\" in text_config else \"clip_vision_model\"\n self.vision_config = get_config_cls(vision_model_type)(**vision_config)\n self.resampler_config = OpenFlamingoPerceiverResamplerConfig(**resampler_config)\n text_model_type = text_config[\"model_type\"] if \"model_type\" in text_config else \"llama\"\n self.text_config = CONFIG_MAPPING[text_model_type](**text_config)\n\n self.tie_word_embeddings = self.text_config.tie_word_embeddings\n self.is_encoder_decoder = self.text_config.is_encoder_decoder\n\n # self.qformer_config.encoder_hidden_size = self.vision_config.hidden_size\n self.use_decoder_only_language_model = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES\n self.initializer_factor = 1.0\n self.initializer_range = 0.02\n\n @classmethod\n def from_vision_resampler_text_configs(\n cls,\n vision_config: PretrainedConfig,\n resampler_config: OpenFlamingoPerceiverResamplerConfig,\n text_config: PretrainedConfig,\n **kwargs,\n ):\n r\"\"\"\n Instantiate a [`OpenFlamingoConfig`] (or a derived class) from a OpenFlamingo vision model, resampler and language model\n configurations.\n\n Returns:\n [`OpenFlamingoConfig`]: An instance of a configuration object\n \"\"\"\n\n return cls(\n vision_config=vision_config.to_dict(),\n resampler_config=resampler_config.to_dict(),\n text_config=text_config.to_dict(),\n **kwargs,\n )\n\n def to_dict(self):\n \"\"\"\n Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].\n\n Returns:\n `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,\n \"\"\"\n output = copy.deepcopy(self.__dict__)\n output[\"vision_config\"] = self.vision_config.to_dict()\n output[\"resampler_config\"] = self.resampler_config.to_dict()\n output[\"text_config\"] = self.text_config.to_dict()\n output[\"model_type\"] = self.__class__.model_type\n return output\n","repo_name":"omni-gpt/OmniGPT4","sub_path":"omnigpt4/models/openflamingo/configuration_openflamingo.py","file_name":"configuration_openflamingo.py","file_ext":"py","file_size_in_byte":11518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5033420713","text":"from flask import jsonify\n\n\ndef populate_obj(obj, data_dictionary):\n fields = data_dictionary.keys()\n\n for field in fields:\n # print(field)\n try:\n getattr(obj, field)\n setattr(obj, field, data_dictionary[field])\n except AttributeError:\n return jsonify({'ERROR': f'Record has no attribute: {field}'}), 400\n","repo_name":"Jamesh431/Cyclone","sub_path":"util/reflection.py","file_name":"reflection.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15987377869","text":"# http://www.lexev.org/2015/trying-json-combo-django-and-postgresql/\n\n# Подходящий пример!!!!!!!!\n# https://www.cybertec-postgresql.com/en/json-postgresql-how-to-use-it-right/#json-good-example\n# https://ru.stackoverflow.com/questions/1222063/%D0%9A%D0%B0%D0%BA-%D0%B4%D0%BE%D0%B1%D0%B0%D0%B2%D0%BB%D1%8F%D1%82%D1%8C-%D1%85%D0%B0%D1%80%D0%B0%D0%BA%D1%82%D0%B5%D1%80%D0%B8%D1%81%D1%82%D0%B8%D0%BA%D0%B8-%D1%82%D0%BE%D0%B2%D0%B0%D1%80%D0%BE%D0%B2-%D1%87%D0%B5%D1%80%D0%B5%D0%B7-json-%D0%B4%D0%BB%D1%8F-%D0%BA%D0%B0%D0%B6%D0%B4%D0%BE%D0%B9-%D0%BA%D0%B0%D1%82%D0%B5%D0%B3%D0%BE%D1%80%D0%B8%D0%B8\n# https://ru.stackoverflow.com/questions/697810/%D0%9E%D1%88%D0%B8%D0%B1%D0%BA%D0%B0-django-mptt\n# https://tretyakov.net/post/drevovidnye-kategorii-v-django/\n# https://github.com/abogushov/django-admin-json-editor\n\nfrom django.db import models\n\n\n# from django.db.models import JSONField\n\n# class Category(models.Model):\n# name = models.CharField(max_length=100)\n\n# def __str__(self):\n# return self.name\n\n# class Product(models.Model):\n# name = models.CharField(max_length=100)\n# category = models.ForeignKey(Category, on_delete=models.CASCADE)\n# price = models.IntegerField()\n# attributes = JSONField()\n\n# def __str__(self):\n# return self.name\n\n\nfrom django_jsonform.models.fields import JSONField\n\nclass Category(models.Model):\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return self.name\n\nclass Product(models.Model):\n name = models.CharField(max_length=100)\n category = models.ForeignKey(Category, on_delete=models.CASCADE)\n price = models.IntegerField()\n\n # Динамическая схема\n # https://django-jsonform.readthedocs.io/en/stable/guide/choices.html#dynamic-choices\n \n ITEMS_SCHEMA = {\n \"type\": \"object\",\n \"title\": \"Характеристики\",\n \"keys\": {\n \"brand\": {\n \"type\": \"string\",\n \"title\": \"Бренд\",\n },\n \"number\": {\n \"type\": \"integer\",\n \"title\": \"Кол-во на складе\",\n 'default': 0,\n },\n 'items': {\n 'type': 'string',\n 'choices': ['Eggs', 'Juice', 'Milk'],\n 'default': 'Milk'\n },\n\n },\n \"additionalProperties\": {\n \"type\": \"string\"\n }\n }\n attributes = JSONField(schema=ITEMS_SCHEMA)\n\n def __str__(self):\n return self.name","repo_name":"maxl85/JSONb","sub_path":"shop/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42685786575","text":"import json\n\nwith open('estoque.json', 'r') as arquivo:\n\tconteudo=arquivo.read()\n\testoque=json.loads(conteudo)\n\tvalor=0\n\n\tfor i in estoque[\"produtos\"]:\n\t\tvalor+=i[\"quantidade\"]*i[\"valor\"]\n\n\tprint(valor)","repo_name":"gabriellaec/desoft-analise-exercicios","sub_path":"backup/user_044/ch159_2020_05_03_20_53_04_698581.py","file_name":"ch159_2020_05_03_20_53_04_698581.py","file_ext":"py","file_size_in_byte":202,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11352628827","text":"class Perceptron:\n def __init__(self, num_inputs=2, weights=[1,1]):\n self.num_inputs = num_inputs\n self.weights = weights\n \n def weighted_sum(self, inputs):\n weighted_sum = 0\n for i in range(self.num_inputs):\n weighted_sum += self.weights[i]*inputs[i]\n return weighted_sum\n \n def activation(self, weighted_sum):\n #Complete this method\n #Inside the .activation() method, return 1 if the weighted_sum is greater than or equal to 0.\n #Inside the .activation() method, return -1 if the weighted_sum is less than 0.\n if weighted_sum >= 0:\n return 1\n else:\n return -1 \n\ncool_perceptron = Perceptron()\nprint(cool_perceptron.weighted_sum([24, 55]))\n#Print out the result of the method .activation() called on cool_perceptron if the weighted sum is 52.\nprint(cool_perceptron.activation(52))\n","repo_name":"Arif-Badhon/Code-Academy","sub_path":"DeepLearningTensorFlow/Neural_Network/Perceptron/Step2_ActivationFunction.py","file_name":"Step2_ActivationFunction.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15103317527","text":"import gradio as gr\nimport os\nimport torch\nfrom PIL import Image\n\n \n#subprocess.run([\"mv\",\"content/custom_data.yaml\",\"./yolov5/data\"]) \n\n\ndef load_model():\n '''\n Loading hub model & setting the preferences for the model \n '''\n model = torch.hub.load('ultralytics/yolov5', 'custom', path='Content/cnn.pt')\n model.conf = 0.38 \n model.dnn=True\n model.agnostic=True\n return model\n\nmodel=load_model()\n#, force_reload=True\ndef detect(inp):\n #g = (size / max(inp.size)) #gain\n #im = im.resize((int(x * g) for x in im.size), Image.ANTIALIAS) # resize \n results = model(inp,size=640) # inference\n results.render() # updates results.imgs with boxes and labels\n return Image.fromarray(results.imgs[0])\n \n\ninp = gr.inputs.Image(type=\"pil\", label=\"Original Image\")\noutput = gr.outputs.Image(type=\"pil\", label=\"Output Image\")\n\n\nio=gr.Interface(fn=detect, inputs=inp, outputs=output, title='CV Social Classification',theme='peach')\nio.launch(debug=True,share=False)\n \n#examples=['Content/4.jpg','Content/10.jpg','Content/18.jpg']\n\n \n ","repo_name":"sriramelango/CV-Social-Classification","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10130115285","text":"import pandas as pd\nimport logging\nimport os\nimport numpy as np\nfrom sklearn.externals import joblib\npd.options.mode.chained_assignment = None \n# np.random.seed(0)\n\ndef main():\n\n\n logging.basicConfig(level=logging.DEBUG)\n\n logging.info(\"Loading data....\\n\")\n repo_path = os.path.dirname(os.path.abspath(__file__))\n df_dir = (\"data\")\n df_dir_path = os.path.join(repo_path, df_dir)\n \n csv_list = []\n\n \n for file in os.listdir(df_dir):\n if file.endswith(\".csv\"):\n logging.debug(os.path.join(df_dir_path, file))\n csv_list.append(os.path.join(df_dir_path, file))\n\n\n df = pd.DataFrame()\n logging.info(\"Creating Dateframe from CSV files ...\\n\")\n for csv_file in csv_list:\n \n if csv_file[-5:]==\"1.csv\":\n temp_df = pd.read_csv(csv_file, sep='\\t',low_memory=False)\n \n else:\n temp_df = pd.read_csv(csv_file, sep=\",\",low_memory=False)\n \n logging.info(\"\\nCSV file:{}\\nSize = {}\\nShape ={}\\nShape[0] x Shape[1] = {}\".format(csv_file,temp_df.size, temp_df.shape, temp_df.shape[0]*temp_df.shape[1]))\n df = df.append(temp_df, ignore_index = True, sort=True)\n \n logging.info(\"\\n\\n-------Final dataframe-------:\\nSize = {}\\nShape ={}\\nShape[0] x Shape[1] = {}\".format(csv_file,df.size, df.shape, df.shape[0]*df.shape[1])) \n\n # df.to_csv(\"pf.csv\")\n \n logging.info(\"\\n\\nFactorize data ...\")\n cols = ['age_cat', 'edcution_cat', 'sex','years_in_residence','reg_cd', 'prod_id']\n df[cols] = df[cols].apply(lambda x: pd.factorize(x)[0] + 1)\n logging.info(\"\\nSubstitute prod_class any_class/prod-type-5-class -> 0/1 ....\")\n mask = (df['prod_class'] == \"prod-type-5-class\")\n neg_mask = ~(df['prod_class'] == \"prod-type-5-class\")\n df['prod_class'][mask] = 1\n df['prod_class'][neg_mask] = 0\n df = df.drop(['user_id'], axis=1)\n df = df.drop(['prod_id'], axis=1)\n # df.to_csv(\"af.csv\")\n\n X = df[[\"age_cat\",\"car_ownership\",\"credit_status_cd\",\"edcution_cat\",\"reg_cd\",\"revenue_usd\",\"sex\",\"years_in_residence\"]].copy()\n Y = df[[\"prod_class\"]].copy()\n\n logging.info(\"\\nDevide data to train/test with test size 0.2\")\n from sklearn.model_selection import train_test_split\n X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2)\n \n logging.info(\"Size of train data X:{}, Y:{}\".format(X_train.shape, y_train.shape))\n logging.info(\"Size of test data X:{}, Y:{}\".format(X_test.shape, y_test.shape))\n \n from sklearn.linear_model import LogisticRegression\n from sklearn.preprocessing import MinMaxScaler\n from sklearn.metrics import accuracy_score\n from sklearn.metrics import confusion_matrix\n \n scaler = MinMaxScaler(feature_range=(0, 1))\n scaler.fit(X_train)\n # save scaler\n scaler_file = \"LR_scaler.sav\"\n joblib.dump(scaler, scaler_file)\n\n X_train = scaler.transform(X_train)\n X_test = scaler.transform(X_test)\n \n y_train = y_train['prod_class'].values.tolist()\n y_test = y_test['prod_class'].values.tolist()\n\n \n TRAIN = False\n filename_LR = 'LR.sav'\n if TRAIN:\n \n print(\"Length of training data set {}\".format(len(X_train)))\n print(\"Start of training LR....\")\n clf = LogisticRegression(random_state=0, solver='liblinear', class_weight='balanced').fit(X_train, y_train)\n # save the model to disk\n\n \n joblib.dump(clf, filename_LR)\n else:\n logging.info(\"\\n\\nTraining is off, loading models...\\n\")\n clf = joblib.load(filename_LR)\n\n test_size = len(X_test)\n X_test = X_test[:test_size, :]\n y_test = y_test[:test_size]\n prediction = clf.predict(X_test)\n predict_proba = clf.predict_proba(X_test)\n score = clf.score(X_test, y_test)\n y_pred = clf.predict(X_test)\n tn, fp, fn, tp = confusion_matrix(y_test, y_pred).ravel()\n precision = tp/(tp+fp)\n recall = tp/(tp+fn)\n logging.info(\"LR Score:{}\".format(score))\n logging.info(\"LR precision:{}\".format(precision))\n logging.info(\"LR recall:{}\".format(recall))\n\n\n filename = 'MLP.sav' \n if TRAIN: \n print(\"Start of training MLP....\")\n \n from sklearn.neural_network import MLPClassifier\n model = MLPClassifier(verbose=True)\n model.fit(X_train, y_train)\n filename = 'MLP.sav'\n joblib.dump(model, filename)\n else:\n model = joblib.load(filename)\n \n\n MLP_acc_score = accuracy_score(y_test, model.predict(X_test))\n y_pred = model.predict(X_test)\n tn, fp, fn, tp = confusion_matrix(y_test, y_pred).ravel()\n precision = tp/(tp+fp)\n recall = tp/(tp+fn)\n logging.info(\"MLP Score:{}\".format(MLP_acc_score))\n logging.info(\"MLP precision:{}\".format(precision))\n logging.info(\"MLP recall:{}\".format(recall))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"MrQubit/case_study","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1319934974","text":"import sys\nimport argparse\nimport logging\nimport csv\n\n# create logger\nlogger = logging.getLogger(\"channable-assignment\")\nlogger.setLevel(logging.INFO)\n\ndef parse_args(args):\n try:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"path_to_before_csv\", type=str)\n parser.add_argument(\"path_to_after_csv\", type=str)\n return parser.parse_args(args)\n except SystemExit as err:\n logger.error(f\"Can't find all arguments: Error code [{err}]\")\n raise SystemExit()\n\ndef open_file(path_to_open):\n try:\n return open(path_to_open, \"r\")\n except IOError as err:\n logger.error(f\"Can't find file or read data: {err}\")\n raise SystemExit()\n\n\"\"\"\nCreate: the product wasn’t imported from the eCommerce system yesterday (before), \nbut it was imported today (after). This means we have to send a create operation \nto the eCommerce platform\n\"\"\"\ndef get_create_operations_list(before = [], after = []):\n row_before = [row_before[\"id\"] for row_before in before]\n row_after = [\n dict(row_after)\n for row_after in after\n if row_after[\"id\"] not in row_before] \n return row_after\n\n\"\"\"\nDelete: the product was imported yesterday (before), but was not imported today (after). \nThis means we have to send a delete operation to the advertisement channel.\n\"\"\"\ndef get_delete_operations_list(before = [], after = []):\n row_after = [row_after[\"id\"] for row_after in after]\n\n # Get only ids comparison\n row_before = []\n for row in before:\n if row[\"id\"] not in row_after:\n d = {}\n d[\"id\"] = row[\"id\"]\n row_before.append(d)\n return row_before\n\n\"\"\"\nUpdate: the product was imported yesterday (before) and is also imported today (after), however, one\nof the values for the products has changed (e.g. price of the product). This means we\nhave to send an update operation to the advertisement channel\n\"\"\"\ndef get_update_operations_list(before = [], after = []):\n # Get list ids\n before_copy = []\n for row in before:\n before_copy.append(row)\n\n after_copy = []\n for row in after:\n after_copy.append(row) \n\n before_list = [row_before[\"id\"] for row_before in before_copy]\n after_list = [row_after[\"id\"] for row_after in after_copy]\n\n \"\"\" \n Get left and right lists that intersects by id \n \"\"\" \n # Before partitioning\n before_partitioning = [\n dict(row_before)\n for row_before in before_copy\n if row_before[\"id\"] in after_list]\n\n # After partitioning\n after_partitioning = [\n dict(row_after)\n for row_after in after_copy\n if row_after[\"id\"] in before_list]\n\n # Get difference (filds thas was updated) between lists\n row_after_partitioning = [row_after_partitioning for row_after_partitioning in after_partitioning\n if row_after_partitioning not in before_partitioning]\n \n # Return after product list that exists on before product list\n # and shows differences between fields\n return row_after_partitioning\n\n\n\nif __name__ == \"__main__\":\n\n # Get args\n args = parse_args(sys.argv[1:])\n\n # open before and after csv file\n before_csv = open_file(args.path_to_before_csv)\n after_csv = open_file(args.path_to_after_csv)\n\n # transform csv to dictionary\n before_dict = csv.DictReader(before_csv, delimiter=\",\")\n after_dict = csv.DictReader(after_csv, delimiter=\",\")\n\n # Get create operations list\n create_operations_list = get_create_operations_list(before_dict, after_dict)\n # TODO: Implement channel integration\n print(f\"Create operations (type: List of dictionaries):\\n{create_operations_list}\\n\")\n\n # Set file objects position to the beginning for the next data retrieval.\n before_csv.seek(0)\n after_csv.seek(0)\n\n # Get delete operations list\n delete_operations_list = get_delete_operations_list(before_dict, after_dict)\n # TODO: Implement channel integration\n print(f\"Delete operations (type: Set of ids):\\n{delete_operations_list}\\n\")\n\n # Set file objects position to the beginning for the next data retrieval.\n before_csv.seek(0)\n after_csv.seek(0)\n\n # Get update operations list\n update_operations_list = get_update_operations_list(before_dict, after_dict)\n # TODO: Implement channel integration\n print(f\"Update operations (type: List of dictionaries):\\n{update_operations_list}\\n\")\n\n before_csv.close()\n after_csv.close()\n","repo_name":"edwardmartinsjr/channable-assignment","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35665021070","text":"import boto3\nimport os\n\n# Replace 'your_access_key', 'your_secret_access_key', and 'your_region' with your actual AWS credentials\naws_access_key_id = os.environ.get('AWS_ACCESS_KEY')\naws_secret_access_key = os.environ.get('AWS_SECRET')\naws_region = 'us-east-2'\n\nbucket = 'opus-training-data'\nresource = \"s3\"\n\n\nclass BlobDelegate:\n def __init__(self):\n self.session = boto3.Session(\n aws_access_key_id=aws_access_key_id,\n aws_secret_access_key=aws_secret_access_key,\n region_name=aws_region\n )\n\n\n def upload_blob(self, blob_id, file_path):\n with open(file_path, 'rb') as data:\n self.client.upload_fileobj(data, bucket, blob_id)\n\n def download_blob(self, _id, filename):\n self.session.resource(resource).Bucket(bucket).download_file(\n Key=_id, Filename=f\"/tmp/{filename}.wav\")\n","repo_name":"DinkBunk/Opus","sub_path":"blob_delegate.py","file_name":"blob_delegate.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35785645970","text":"from . import Base\nfrom sqlalchemy import Column, String, Integer, DateTime,Numeric\nimport datetime\n\nclass Match500Daxiao(Base):\n __tablename__ = 'match_500_daxiao'\n id = Column(Integer, primary_key=True)\n match_id = Column(Integer)\n comp = Column(String(45))\n handicap = Column(String(45))\n over = Column(Numeric)\n under = Column(Numeric)\n create_time = Column(DateTime, nullable=False)\n update_time = Column(DateTime, nullable=False)\n\n def __init__(self):\n self.create_time = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n self.update_time = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")","repo_name":"Foreinyel/bodan","sub_path":"adagu_bodan/model/match_500_daxiao.py","file_name":"match_500_daxiao.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23717351190","text":"from Dock2D.Models.TrainerFI import *\nimport random\nfrom Dock2D.Utility.TorchDataLoader import get_interaction_stream, get_docking_stream\nfrom torch import optim\nfrom Dock2D.Utility.PlotterFI import PlotterFI\nfrom Dock2D.Models.model_interaction import Interaction\nfrom Dock2D.Models.model_sampling import SamplingModel\nfrom Dock2D.Utility.TorchDockingFFT import TorchDockingFFT\n\n\nif __name__ == '__main__':\n #################################################################################\n ##Datasets\n trainset = '../../Datasets/interaction_train_400pool.pkl'\n validset = '../../Datasets/interaction_valid_400pool.pkl'\n # validset = '../../Datasets/docking_valid_400pool.pkl'\n\n # ### testing set\n testset = '../../Datasets/interaction_test_400pool.pkl'\n #########################\n #### initialization of random seeds\n random_seed = 42\n randomstate = np.random.RandomState(random_seed)\n np.random.seed(random_seed)\n torch.manual_seed(random_seed)\n random.seed(random_seed)\n torch.cuda.manual_seed(random_seed)\n # torch.backends.cudnn.deterministic = True\n torch.cuda.set_device(0)\n # torch.autograd.set_detect_anomaly(True)\n #########################\n ## number_of_pairs provides max_size of interactions: max_size = number_of_pairs*(number_of_pairs + 1)/2\n number_of_pairs = 100\n train_stream = get_interaction_stream(trainset, number_of_pairs=number_of_pairs)#, randomstate=randomstate)\n valid_stream = get_interaction_stream(validset, number_of_pairs=None)\n # valid_stream = get_docking_stream(validset, max_size=None, shuffle=False)\n test_stream = get_interaction_stream(testset, number_of_pairs=None)\n ######################\n # experiment = 'BF_FI_finaldataset_100pairs_1000ep'\n # experiment = 'BF_FI_finaldataset_100pairs_expC_BFIP_1000ex100ep'\n # experiment = 'BF_FI_finaldataset_100pairs_expC_BSIP_1000ex100ep'\n\n experiment = 'timer_BF_FI_finaldataset_100pairs_expC_BSIP_1000ex100ep'\n\n ##################### Load and freeze/unfreeze params (training, no eval)\n ### path to pretrained docking model\n # path_pretrain = 'Log/saved_models/IP_saved/BF_IP_finaldataset_1000pairs_100ep100.th'\n path_pretrain = 'Log/saved_models/IP_saved/BS_IP_finaldataset_1000pairs_100ep100.th'\n # training_case = 'A' # CaseA: train with docking model frozen\n # training_case = 'B' # CaseB: train with docking model unfrozen\n training_case = 'C' # CaseC: train with docking model SE2 CNN frozen and scoring (\"a\") coeffs unfrozen\n # training_case = 'scratch' # Case scratch: train everything from scratch\n experiment = training_case + '_' + experiment\n #####################\n train_epochs = 10\n lr_interaction = 10 ** -1\n lr_docking = 10 ** -4\n # sample_steps = 10\n sample_buffer_length = max(len(train_stream), len(valid_stream), len(test_stream))\n\n ########################\n model_name = \"BF IF\"\n debug = False\n plotting = False\n show = False\n ########################\n\n interaction_model = Interaction().to(device=0)\n interaction_optimizer = optim.Adam(interaction_model.parameters(), lr=lr_interaction)\n\n padded_dim = 100\n num_angles = 360\n dockingFFT = TorchDockingFFT(padded_dim=padded_dim, num_angles=num_angles, model_name=model_name)\n docking_model = SamplingModel(dockingFFT, FI_BF=True).to(device=0)\n docking_optimizer = optim.Adam(docking_model.parameters(), lr=lr_docking)\n Trainer = TrainerFI(docking_model, docking_optimizer, interaction_model, interaction_optimizer, experiment,\n training_case, path_pretrain,\n FI_MC=False,\n plotting=plotting,)\n ######################\n import timeit\n\n start = timeit.default_timer()\n ### Train model from beginning\n Trainer.run_trainer(train_epochs, train_stream=train_stream, valid_stream=None, test_stream=None)\n end = timeit.default_timer()\n print('Total time to load all 3 datasets:', end - start)\n\n ## Resume training model at chosen epoch\n # Trainer.run_trainer(resume_training=True, resume_epoch=845, train_epochs=155, train_stream=train_stream, valid_stream=None, test_stream=None)\n\n # Validate model at chosen epoch\n # Trainer.run_trainer(train_epochs=1, train_stream=None, valid_stream=valid_stream, test_stream=test_stream,\n # resume_training=True, resume_epoch=10)\n #\n ### Plot loss and free energy distributions with learned F_0 decision threshold\n # PlotterFI(experiment).plot_loss(show=True)\n # PlotterFI(experiment).plot_deltaF_distribution(plot_epoch=100, show=True, xlim=1000)\n","repo_name":"lamoureux-lab/Dock2D","sub_path":"Models/BruteForce/train_bruteforce_FI.py","file_name":"train_bruteforce_FI.py","file_ext":"py","file_size_in_byte":4614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5348045888","text":"import logging\nimport pymysql\n\n# logging config\nlogger = logging.getLogger()\nlogger.addHandler(logging.StreamHandler())\nlogger.setLevel(logging.INFO)\n\n# db config\nhost1 = \"av-q8-db.csaruqlxxway.us-east-1.rds.amazonaws.com\"\nport1 = 3306\ndb_name = \"av_q8_db\"\nuser1 = \"\"\npwd = \"\"\n\n# queries\nqry_create = \"CREATE TABLE Emp(eid int, ename varchar(30))\"\nqry_insert = \"INSERT INTO Emp(eid, ename) VALUES (1, 'user1'),(2, 'user2'),(3, 'user3'),(4, 'user4')\"\nqry_read = \"SELECT * FROM Emp\"\nqry_update = \"UPDATE Emp SET eid = 10, ename = 'user10' WHERE eid = 1\"\nqry_delete = \"DELETE FROM Emp WHERE eid = 2\"\n\n# connection object\nconnectn = pymysql.connect(host=host1, user=user1, password=pwd, db=db_name, port=port1)\n\ntry:\n # CREATE operation\n cursr = connectn.cursor()\n cursr.execute(qry_create)\n qry_show = \"show tables\"\n cursr.execute(qry_show)\n rows = cursr.fetchall()\n for row in rows:\n logger.info(row)\n \n # READ operation\n cursr.execute(qry_insert)\n cursr.execute(qry_read)\n rows = cursr.fetchall()\n for row in rows:\n logger.info(row)\n \n # UPDATE operation\n cursr.execute(qry_update)\n cursr.execute(qry_read)\n rows = cursr.fetchall()\n for row in rows:\n logger.info(row)\n \n # DELETE operation\n cursr.execute(qry_delete)\n cursr.execute(qry_read)\n rows = cursr.fetchall()\n for row in rows:\n logger.info(row)\n \n connectn.commit()\nexcept Exception as e:\n logger.info(e)\nfinally:\n connectn.close()\n","repo_name":"avinashsidhwani/aws-assessment","sub_path":"q8.py","file_name":"q8.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31004004186","text":"import numpy as np\nimport torch\n\nfrom gym.envs.classic_control.cartpole import CartPoleEnv\n\nfrom torch import nn\n\n\nclass DQN(nn.Module):\n def __init__(self, env: CartPoleEnv, optimizer_lr: float):\n super().__init__()\n\n in_features = int(np.prod(env.observation_space.shape))\n\n self.optimizer_lr = optimizer_lr\n self.net = nn.Sequential(\n nn.Linear(in_features=in_features, out_features=64),\n nn.Tanh(),\n nn.Linear(in_features=64, out_features=env.action_space.n)\n )\n\n self.optimizer = torch.optim.Adam(self.net.parameters(), self.optimizer_lr)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return self.net(x)\n\n def get_action(self, state: np.ndarray) -> int:\n state_t = torch.FloatTensor(np.expand_dims(state, axis=0))\n q_values = self(state_t)\n\n max_q_index = torch.argmax(q_values, dim=1)[0]\n action = max_q_index.detach().item()\n\n return action\n","repo_name":"stav95/ReinforcementLearningCartPole","sub_path":"dqn.py","file_name":"dqn.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31516338981","text":"import pandas as pd\r\nfrom pandas.io.json import json_normalize \r\nfrom datetime import datetime\r\nimport smartsheet\r\nimport pyodbc\r\n\r\n\r\ntok ='<>' # Your Smartsheet ODBC token\r\n \r\n# Initialize client\r\nsmart = smartsheet.Smartsheet(tok)\r\n\r\n#Function to get smartsheet as dataframe. Export sheet as csv and read as df. Faster than making the df cell by cell.\r\ndef get_sheet_as_df(smart , sheet_id):\r\n ss = smart.Sheets.get_sheet_as_csv(sheet_id , \".\") #dump csv into same directory as code. Specify own path in place of \".\" if needed.\r\n df = pd.read_csv(ss.filename) #read that csv as dataframe\r\n return df \r\n\r\n\r\n#drop n rows from ss. numrows is the argument for n\r\ndef drop_rows(numrows):\r\n shee = smart.Sheets.get_sheet(sheet_id)\r\n sheet_dict = shee.to_dict()\r\n rowlist = [i['id'] for i in sheet_dict['rows']] #list comprehension to get list of row ids of the smartsheet\r\n if (numrows > 100): #If a large no of rows to be deleted\r\n for i in range(100 , numrows , 100): #100 at a time\r\n smart.Sheets.delete_rows(sheet_id , rowlist[numrows-i:numrows-i+100]) #last 100 rows\r\n numrows = numrows-i #to catch the balance above a multiple of 100. eg if numrows was 118, 18 are still pending\r\n if(numrows == 0): #If numrows was a multiple of 100, then all have been processed\r\n return\r\n else:\r\n smart.Sheets.delete_rows(sheet_id , rowlist[-numrows:]) #delete the last n%100 rows using the Smartsheet sdk function.\r\n return\r\n\r\n#append dataframe into smartsheet. This checks first if the SS row limit is exceeded. If it is, then it drops the extra rows first before appending.\r\ndef write_into_ss(df): \r\n # Get all columns\r\n action = smart.Sheets.get_columns(sheet_id, include_all=True)\r\n columns = action.to_dict() #JSON to dictionary \r\n \r\n #Read the existing sheet as a df\r\n ssdf = get_sheet_as_df(smart , sheet_id) #df of the smartsheet\r\n idlist = list(ssdf['id']) #List of all the document id's in the smartsheet till now\r\n if (ssdf.shape[0] + df.shape[0] >=19990): #if ss+incoming df is close to 20,000 rows, then we will need to make place for new additions. This is because SS has a limit of 20K rows.\r\n drop_rows(df.shape[0]) #drop ss rows equal to no of rows to be appended\r\n \r\n df = df.sort_values(by = ['']) #Sort on name of the column you need to maintain sorted.\r\n #Nested loop to read each cell of the dataframe and copy that into smartsheet cell by cell\r\n for i in range(df.shape[0]): \r\n if (df.iloc[i]['id'] in idlist): #If the current row's docid is already existing in ss, then skip that row\r\n continue\r\n row = smartsheet.models.Row() #initialize a new row variable for each row in the df\r\n row.to_top = True #Append new rows into the top of the Smartsheet\r\n for j in range(df.shape[1]): #Loop the columns for that row\r\n content = makecell( columns['data'][j]['id'] , df.iloc[i][j] ) #Pass the column id and dataframe cell value\r\n row.cells.append(content)\r\n smart.Sheets.add_rows(sheet_id,[row])\r\n \r\n return 0\r\n\r\n#Create entry for a single cell\r\ndef makecell(col_id , value):\r\n cell = smartsheet.models.Cell()\r\n cell.column_id = col_id\r\n cell.value = str(value) #For test purpose, make into string.\r\n return cell\r\n\r\n","repo_name":"bhawik21/Smartsheet-Helpers","sub_path":"Smartsheet Helper Functions.py","file_name":"Smartsheet Helper Functions.py","file_ext":"py","file_size_in_byte":3384,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"2485334527","text":"\n\n\n# 不用字符串\n'''\ndef isPalindrome(x: int) -> bool:\n if x<0 :\n return False\n a = x\n result = 0\n c = 0 # 位数\n while(a!=0):\n c = a%10\n a = a//10\n result = result*10+c\n return True if result == x else False'''\n\n# 回文数可以只判断一半位数\ndef isPalindrome(x: int) -> bool:\n if x<0 or (x%10 == 0 and x!=0):\n return False\n result = 0\n while(x>result): # 将x的位给result,当result>x表示已经过了一半\n result = result*10 + x%10\n x = x//10\n # x可能长度为奇数,也可能为偶数\n # 长度偶数时,直接判断result和x,奇数时,需要除掉result的最后一位\n return x == result or x == result//10 \n\nprint(isPalindrome(1001))","repo_name":"August1s/LeetCode","sub_path":"Math/No9回文数.py","file_name":"No9回文数.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12678021596","text":"import requests\nfrom contextlib import closing\n\"\"\"\n 下载文件并显示进度条\n\"\"\"\n\n\nclass ProgressBar(object):\n \"\"\" 格式化的进度条显示模块 \"\"\"\n\n def __init__(self, title, count=0.0, run_status=None, fin_status=None, \n total=100.0, unit='', sep='/', chunk_size=1.0):\n super(ProgressBar, self).__init__()\n self.info = \"【%s】%s %.2f %s %s %.2f %s\"\n self.title = title\n self.total = total\n self.count = count\n self.chunk_size = chunk_size\n self.status = run_status or \"\"\n self.fin_status = fin_status or \" \" * len(self.status)\n self.unit = unit\n self.seq = sep\n\n def __get_info(self):\n # 【名称】状态 进度 单位 分割线 总数 单位\n _info = self.info % (self.title, self.status,\n self.count/self.chunk_size, self.unit, self.seq, \n self.total/self.chunk_size, self.unit)\n return _info\n\n def refresh(self, count=1, status=None):\n self.count += count\n # if status is not None:\n self.status = status or self.status\n end_str = \"\\r\"\n if self.count >= self.total:\n end_str = '\\n'\n self.status = status or self.fin_status\n print(self.__get_info(), end=end_str)\n\n\ndef txtToList(file):\n \"\"\"获取txt文件中的下载地址\"\"\"\n with open(file) as f:\n result = f.read().splitlines()\n return result\n\n\ndef strToList(str):\n \"\"\"string 按照 制表符 组装成list\"\"\"\n result = str.split(\"\\t\")\n return result\n\n\ndef downForUrl(name, url):\n \"\"\"url实现下载并保存\"\"\"\n header = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/\" +\n \"537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36\"}\n with closing(requests.get(url, headers=header, stream=True)) as response:\n chunk_file_size = 1024 # 单次请求最大值\n content_size = int(response.headers['content-length']) # 内容体总大小\n progress = ProgressBar(name, total=content_size, unit=\"KB\",\n chunk_size=chunk_file_size,\n run_status=\"downing...\",\n fin_status=\"Download Over\")\n\n with open(name, 'wb') as f:\n for data in response.iter_content(chunk_size=chunk_file_size):\n f.write(data)\n progress.refresh(count=len(data))\n\n\nif __name__ == \"__main__\":\n file = '1.txt'\n list_result = txtToList(file)\n for line in list_result:\n result = strToList(line)\n name = result[0]+\"-\"+result[1]+\".apk\"\n downForUrl(name, result[3])\n","repo_name":"atworkhi/WorkTools","sub_path":"downloading/down.py","file_name":"down.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33422935664","text":"from __future__ import print_function\r\nimport cv2 as cv\r\nimport numpy as np\r\nimport math\r\n#from tkinter import *\r\nimport start_menu\r\nimport threading\r\nfrom menu import *\r\nfrom smarts import smarts\r\nfrom smarter import smarter\r\nfrom gsmarts import *\r\nimport variables\r\nimport matplotlib.pyplot as plt\r\nimport time\r\nfrom matplotlib.animation import FuncAnimation\r\nfrom feedback import report\r\n\r\n# inicialization of global variables\r\nvariables.init()\r\n\r\n# Threads definition\r\ndef thread_swing():\r\n variables.elicitation_done = swing()\r\n\r\n\r\ndef thread_feedback():\r\n report()\r\n\r\n#k_smarts = [100,90,40,20,30,60]\r\n#s = [4,200,5,0,1,2]\r\n\r\n# type of criteria (0 = continuous; 1 = discrete; 2 = binary)\r\nc_type = [0,0,0,2,1,1]\r\n\r\n#start elicitation-swing \r\nTswing = threading.Thread(target=thread_swing)\r\nTswing.start()\r\n\r\n# swing bar graph visualization\r\nwhile(variables.elicitation_done == False):\r\n Criteria = ['Distance','Length','Altitude','Wind','Urban density', 'Support']\r\n \r\n\r\n def animate(i):\r\n\r\n plt.cla()\r\n\r\n plt.bar(Criteria, variables.elicit)\r\n plt.title('Choose the criterion that you want to maximize in order of preference:')\r\n plt.xlabel('Criterion')\r\n plt.ylabel('Score')\r\n plt.tight_layout()\r\n\r\n\r\n ani = FuncAnimation(plt.gcf(), animate, interval=100)\r\n\r\n plt.tight_layout()\r\n plt.show()\r\n\r\ninit_weights()\r\ninit_st()\r\n\r\n\r\nchoice() #calls for thread choose method in menu.py\r\n\r\n# start thread of feedback data \r\nfb = threading.Thread(target=thread_feedback)\r\nfb.start()\r\n\r\n# map and airplane image load\r\nsrcMap = cv.imread(\"map.png\")\r\nsrcPlane = cv.imread(\"airplane3.png\")\r\n\r\n# mapped airports name\r\nairports_name = [\"LaGuardia Airport\",\"Teterboro Airport\",\"JFK Airport\",\"Republic Airport\",\"MacArthur Airport\",\r\n\"Westchester Country Airport\",\"Hill Top Airport\",\"Lincoln Park Airport\",\"Essex Country Airport\",\"Morristown Airport\",\r\n\"Linden Airoprt\",\"Newark Airport\",\"Central Jersey Airport\"]\r\n\r\n# index(Distance, Runway Length, Altitude, Wind direction, Building density level, Support level)\r\nairport_matrix = [\r\n [12.41, 2134, 20, 2,1,2,0],\r\n [15.42,1833,8.4,1,3,5,1],\r\n [31.27,3048,17.4,1,2,3,2],\r\n [40.22,2083,82,2,3,4,3],\r\n [65.72,2135,99,2,3,7,4],\r\n [29.44,1996,439,1,2,7,5],\r\n [48.17,655,921,1,1,7,6],\r\n [38.55,3932,1219,1,2,5,7],\r\n [33.89,1387,172,2,2,4,8],\r\n [45.58,1828,187,1,2,5,9],\r\n [39.10,1260,23,1,3,3,10],\r\n [28.71,3048,17.4,1,2,1,11],\r\n [69.27,1070,86,1,2,4,12]\r\n]\r\n\r\nairport_matrix_ori = [\r\n [12.41, 2134, 20, 2,1,2,0],\r\n [15.42,1833,8.4,1,3,5,1],\r\n [31.27,3048,17.4,1,2,3,2],\r\n [40.22,2083,82,2,3,4,3],\r\n [65.72,2135,99,2,3,7,4],\r\n [29.44,1996,439,1,2,7,5],\r\n [48.17,655,921,1,1,7,6],\r\n [38.55,3932,1219,1,2,5,7],\r\n [33.89,1387,172,2,2,4,8],\r\n [45.58,1828,187,1,2,5,9],\r\n [39.10,1260,23,1,3,3,10],\r\n [28.71,3048,17.4,1,2,1,11],\r\n [69.27,1070,86,1,2,4,12]\r\n]\r\n#coordinates of airports in map\r\nairport_coord = [\r\n [638,392,0],\r\n [472,296,1],\r\n [710,539,2],\r\n [1031,447,3],\r\n [1304,364,4],\r\n [779,54,5],\r\n [234,39,6],\r\n [253,189,7],\r\n [292,277,8],\r\n [166,364,9],\r\n [318,573,10],\r\n [370,490,11],\r\n [15,674,12]\r\n]\r\n#Constante de aproximacao pixels - KM\r\nc_aprox = 10.26 # valor obtido pela média das taxas de distancia pixel/km das 3 coordenadas abaixo\r\n#(779-15)²+(674-54)²=(D)² = 983 -> taxa de 10,25 pixels/km\r\n#(1304-15)²+(674-364)²=(D2)²= 1.325,75 -> taxa de 10,24 pixels/km\r\n#(1304-779)²+(364-54)²=(D3)² = 609,69 -> taxa de 10,28 pixels/km\r\n# Central jersey(15,674) - westchester(779,54) = 95.88\r\n# Central jersey - macarthur = 129.45\r\n# westchester - macarthur(1304,364) = 59.30\r\nglide_ratio = 17\r\n\r\n\r\n#INICIALIZAÇÃO COORDENADAS MAPA\r\n\r\n# INICIALIZAÇÃO VARIAVEIS MENU DE VOO\r\nmax_value_A = 12000 # M\r\nmax_value_H = 360//2\r\nmax_value_R = 360\r\nmax_value_X = srcMap.shape[1]\r\nmax_value_Y = srcMap.shape[0]\r\nlow_H = 0\r\nlow_A = 0\r\nlow_X = 0\r\nlow_Y = 0\r\nlow_R = 0\r\nhigh_A = max_value_A\r\nhigh_X = max_value_X\r\nhigh_Y = max_value_Y\r\nhigh_H = max_value_H\r\nhigh_R = max_value_R\r\nwindow_capture_name = 'Map'\r\nwindow_detection_name = 'Flight Status'\r\nlow_H_name = 'Low H'\r\nhigh_H_name = 'High H'\r\naltitude_name = \"altitude\"\r\nxpos_name = \"x coord\"\r\nypos_name = \"y coord\"\r\nrotation_name = \"rotation\"\r\n\r\n#select = objectives()\r\n#print(select)\r\n\r\n\r\ndef on_low_H_thresh_trackbar(val):\r\n global low_H\r\n global high_H\r\n low_H = val\r\n low_H = min(high_H-1, low_H)\r\n cv.setTrackbarPos(low_H_name, window_detection_name, low_H)\r\ndef on_high_H_thresh_trackbar(val):\r\n global low_H\r\n global high_H\r\n high_H = val\r\n high_H = max(high_H, low_H+1)\r\n cv.setTrackbarPos(high_H_name, window_detection_name, high_H)\r\ndef on_altitude_trackbar(val):\r\n global low_A\r\n global high_A \r\n high_A = val\r\n high_A = max(high_A, low_A+1)\r\n cv.setTrackbarPos(altitude_name, window_detection_name, high_A)\r\ndef on_Xpos_trackbar(val):\r\n global low_X\r\n global high_X \r\n high_X = val\r\n high_X = max(high_X, low_X+1)\r\n cv.setTrackbarPos(xpos_name, window_detection_name, high_X)\r\ndef on_Ypos_trackbar(val):\r\n global low_Y\r\n global high_Y \r\n high_Y = val\r\n high_Y = max(high_Y, low_Y+1)\r\n cv.setTrackbarPos(ypos_name, window_detection_name, high_Y)\r\ndef on_rotation_trackbar(val):\r\n global low_R\r\n global high_R\r\n high_R = val\r\n high_R = max(high_R, low_R+1)\r\n cv.setTrackbarPos(rotation_name, window_detection_name, high_R)\r\n\r\n#Trackbars of flight controls\r\ncv.namedWindow(window_capture_name)\r\ncv.namedWindow(window_detection_name)\r\ncv.createTrackbar(altitude_name, window_detection_name , high_A, max_value_A, on_altitude_trackbar)\r\ncv.createTrackbar(xpos_name, window_detection_name , high_X, max_value_X, on_Xpos_trackbar)\r\ncv.createTrackbar(ypos_name, window_detection_name , high_Y, max_value_Y, on_Ypos_trackbar)\r\ncv.createTrackbar(rotation_name, window_detection_name , high_R, max_value_R, on_rotation_trackbar)\r\ncv.resizeWindow(window_detection_name,width=400,height=2)\r\nsrcPlane = cv.resize(srcPlane,(int(srcPlane.shape[1]), int(srcPlane.shape[1])), interpolation=cv.INTER_AREA)\r\n\r\n# Rotation plane function\r\ndef combine_img(image1, image2, anchor_y, anchor_x):\r\n foreground, background = image1.copy(), image2.copy()\r\n able = True\r\n # Check if the foreground is inbound with the new coordinates and raise an error if out of bounds\r\n background_height = background.shape[0]\r\n background_width = background.shape[1]\r\n foreground_height = foreground.shape[0]\r\n foreground_width = foreground.shape[1]\r\n if foreground_height+anchor_y > background_height or foreground_width+anchor_x > background_width:\r\n able = False\r\n \r\n alpha =0.8\r\n\r\n # do composite at specified location\r\n start_y = anchor_y\r\n start_x = anchor_x\r\n end_y = anchor_y+foreground_height\r\n end_x = anchor_x+foreground_width\r\n if(able):\r\n blended_portion = cv.addWeighted(foreground, alpha, background[start_y:end_y, start_x:end_x,:], 1 - alpha,0)\r\n background[start_y:end_y, start_x:end_x,:] = blended_portion\r\n return background\r\ndef rotate_image(mat, angle):\r\n \"\"\"\r\n Rotates an image (angle in degrees) and expands image to avoid cropping\r\n \"\"\"\r\n\r\n height, width = mat.shape[:2] # image shape has 3 dimensions\r\n image_center = (width/2, height/2) # getRotationMatrix2D needs coordinates in reverse order (width, height) compared to shape\r\n\r\n rotation_mat = cv.getRotationMatrix2D(image_center, angle, 1.)\r\n\r\n # rotation calculates the cos and sin, taking absolutes of those.\r\n abs_cos = abs(rotation_mat[0,0]) \r\n abs_sin = abs(rotation_mat[0,1])\r\n\r\n # find the new width and height bounds\r\n bound_w = int(height * abs_sin + width * abs_cos)\r\n bound_h = int(height * abs_cos + width * abs_sin)\r\n\r\n # subtract old image center (bringing image back to origo) and adding the new image center coordinates\r\n rotation_mat[0, 2] += bound_w/2 - image_center[0]\r\n rotation_mat[1, 2] += bound_h/2 - image_center[1]\r\n\r\n # rotate image with the new bounds and translated rotation matrix\r\n rotated_mat = cv.warpAffine(mat, rotation_mat, (bound_w, bound_h))\r\n return rotated_mat\r\n\r\n# Glide radius of the plane\r\ndef get_glide_radius(z_coord):\r\n radius = int((z_coord/1000)*glide_ratio*c_aprox)\r\n return radius\r\n\r\n#update distance from airplane to airport in the airport matrix\r\ndef update_pos(x_coord,y_coord):\r\n iterator = 0\r\n for a in airport_coord:\r\n pixel_dist = pow(a[0]-x_coord,2)+pow(a[1]-y_coord,2)\r\n pixel_dist = math.sqrt(pixel_dist)\r\n airport_matrix[iterator][0] = pixel_dist/10.25\r\n airport_matrix[iterator][1] = airport_matrix_ori[iterator][1]\r\n airport_matrix[iterator][2] = airport_matrix_ori[iterator][2]\r\n airport_matrix[iterator][3] = airport_matrix_ori[iterator][3]\r\n airport_matrix[iterator][4] = airport_matrix_ori[iterator][4]\r\n airport_matrix[iterator][5] = airport_matrix_ori[iterator][5]\r\n iterator = iterator+1\r\n\r\n\r\n\r\n\r\nplanesize = np.float32([[0,0],[srcPlane.shape[0],0],[0,srcPlane.shape[1]],[srcPlane.shape[0],srcPlane.shape[1]]])\r\n\r\n\r\ndef matrix_smarts(matrix_con):\r\n if matrix_con is None:\r\n return 0\r\n else:\r\n max_vc = []\r\n min_vc = []\r\n for alt in range(len(matrix_con)):\r\n for cri in range(len(matrix_con[0])-1):\r\n if(alt == 0):\r\n max_vc.append(matrix_con[alt][cri])\r\n min_vc.append(matrix_con[alt][cri])\r\n else:\r\n if matrix_con[alt][cri] > max_vc[cri]:\r\n max_vc[cri] = matrix_con[alt][cri]\r\n if matrix_con[alt][cri] < min_vc[cri]:\r\n min_vc[cri] = matrix_con[alt][cri]\r\n for alt in range(len(matrix_con)):\r\n for cri in range(len(matrix_con[0])-1):\r\n if(max_vc[cri]-min_vc[cri] != 0):\r\n if(cri == 1):\r\n matrix_con[alt][cri] = (matrix_con[alt][cri] - min_vc[cri])/(max_vc[cri]-min_vc[cri])\r\n else: \r\n matrix_con[alt][cri] = (matrix_con[alt][cri] - max_vc[cri])/(min_vc[cri]-max_vc[cri])\r\n else:\r\n matrix_con[alt][cri] = 0\r\n return matrix_con\r\n\r\nwhile True:\r\n matrix_selected = [] # available alternatives\r\n map = srcMap.copy()\r\n srcPlane_R = rotate_image(srcPlane,high_R)\r\n cv.circle(map, (high_X,high_Y), get_glide_radius(high_A), (0,0,255), thickness=5)\r\n update_pos(high_X,high_Y)\r\n for a in airport_coord:\r\n if(pow(high_X-a[0],2)+pow(high_Y-a[1],2) <= pow(get_glide_radius(high_A),2)):\r\n map = cv.circle(map, (a[0],a[1]), radius=1, color=(0, 255, 0), thickness=10)\r\n matrix_selected.append(airport_matrix[a[2]])\r\n else: \r\n map = cv.circle(map, (a[0],a[1]), radius=1, color=(0, 255, 255), thickness=10)\r\n planecoord = np.float32([[high_X-20,high_Y-20],[high_X+20,high_Y-20],[high_X-20,high_Y+20],[high_X+20,high_Y+20]])\r\n homography1, status1 = cv.findHomography(planesize,planecoord)\r\n warpBoard1 = cv.warpPerspective(srcPlane_R, homography1, (srcMap.shape[1], srcMap.shape[0]), borderMode=cv.BORDER_CONSTANT, borderValue=(0,0,0))\r\n merged_image = np.where(warpBoard1==0, map, warpBoard1)\r\n if(len(matrix_selected)>1):\r\n correct_k = intra_analysis(matrix_selected,variables.weights_smarts,variables.st_smartest,c_type)\r\n\r\n normalized_matrix = matrix_smarts(matrix_selected)\r\n if(variables.activate[2]==1):\r\n get_smartest = gsmarts(normalized_matrix,variables.weights_smarts,correct_k)\r\n if not get_smartest:\r\n print(\"no choice SMARTEST\")\r\n variables.found_smartest = False\r\n else:\r\n variables.found_smartest = True\r\n best_coord = (airport_coord[get_smartest[0][0]][0],airport_coord[get_smartest[0][0]][1])\r\n variables.best_smartest = airports_name[get_smartest[0][0]]\r\n variables.score_smartest = get_smartest[0][1]\r\n cv.line(merged_image, (high_X,high_Y), best_coord, color=(0, 255, 0), thickness=10)\r\n if(variables.activate[0]==1):\r\n get_order = smarts(normalized_matrix,variables.weights_smarts)\r\n if not get_order:\r\n print(\"no choice SMARTS\")\r\n variables.found_smarts = False\r\n else:\r\n variables.found_smarts = True\r\n best_coord = (airport_coord[get_order[0][0]][0],airport_coord[get_order[0][0]][1])\r\n variables.best_smarts = airports_name[get_order[0][0]]\r\n variables.score_smarts = get_order[0][1]\r\n cv.line(merged_image, (high_X,high_Y), best_coord, color=(255, 0, 0), thickness=7) \r\n if(variables.activate[1]==1):\r\n get_smarter = smarter(normalized_matrix,variables.rank)\r\n if not get_smarter:\r\n print(\"no choice SMARTER\")\r\n variables.found_smarter = False\r\n else:\r\n variables.found_smarter = True\r\n best_coord = (airport_coord[get_smarter[0][0]][0],airport_coord[get_smarter[0][0]][1])\r\n variables.best_smarter = airports_name[get_smarter[0][0]]\r\n variables.score_smarter = get_smarter[0][1]\r\n cv.line(merged_image, (high_X,high_Y), best_coord, color=(0, 255, 255), thickness=3) \r\n \r\n cv.imshow(window_capture_name, combine_img(srcPlane_R,map,high_Y,high_X))\r\n cv.imshow(window_capture_name, merged_image)\r\n\r\n key = cv.waitKey(30)\r\n if key == ord('q') or key == 27:\r\n break","repo_name":"GustavoCamargoRL/FlightAssist","sub_path":"pilot.py","file_name":"pilot.py","file_ext":"py","file_size_in_byte":13629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3114119164","text":"'''\nCreated on Jul. 3, 2021\n\n@author: zollen\n'''\n\nimport pandas as pd\nimport numpy as np\nimport time\nimport warnings\n\nwarnings.filterwarnings('ignore')\n\npd.set_option('max_columns', None)\npd.set_option('max_rows', None)\npd.set_option('display.width', 1000)\n\ntest = pd.read_csv('../data/test.csv')\npreds = pd.read_csv('../data/prediction.csv')\ntrain = pd.read_csv('../data/monthly_train.csv')\n\n\n'''\nshop_id, item_id, date_block_num, lg2, lg1, item_cnt_month\n1 2 0 1 \n1 2 1 2\n1 2 2 1 2 1 \n1 2 3 2 1 1\n1 2 4 1 1 2 \n1 2 5 1 2 2\n1 2 6 2 2 1\n\nhttps://stackoverflow.com/questions/20410312/how-to-create-a-lagged-data-structure-using-pandas-dataframe\nhttps://rayheberer.medium.com/generating-lagged-pandas-columns-10397309ccaf \n'''\ndef lag_features(df, trailing_window_size, columns, targets, no_na=True):\n \n df_lagged = df.copy()\n \n for window in range(1, trailing_window_size + 1):\n shifted = df[columns + targets ].groupby(columns).shift(window)\n shifted.columns = [x + \"_lag\" + str(window) for x in df[targets]]\n df_lagged = pd.concat((df_lagged, shifted), axis=1)\n \n if no_na:\n df_lagged.dropna(inplace=True)\n \n return df_lagged\n \n'''\nkeys = ['shop_id', 'item_id']\n\ntstart = time.time()\nt = lag_features(train, 3, keys, ['item_cnt_month'])\ntend = time.time()\nprint(t.head(500))\nprint(\"TIME: \", tend - tstart)\n'''\n\nk = pd.DataFrame({\n 'A': [1, 1, 1, 1, 1, 2, 2, 2, 2, 2], \n 'B': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n 'C': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n })\nt = lag_features(k, 3, ['A'], ['B'], False)\nprint(t.dropna())\nt = lag_features(t, 2, ['A'], ['C'])\nt.drop(columns=['C_lag1'], inplace=True)\nprint(t)\n\ng = lag_features(k, 3, ['A'], ['B', 'C'])\nprint(g.drop(columns=['C_lag1', 'C_lag3']))\n","repo_name":"zollen/Python-ML","sub_path":"futuresales_kaggle/analysis/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32685114396","text":"########################################################################\n# File name: test_types.py\n# This file is part of: aioxmpp\n#\n# LICENSE\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program. If not, see\n# .\n#\n########################################################################\nimport abc\nimport contextlib\nimport decimal\nimport fractions\nimport inspect\nimport ipaddress\nimport itertools\nimport unittest\nimport unittest.mock\nimport warnings\n\nfrom enum import Enum, IntEnum\n\nimport pytz\n\nfrom datetime import datetime, date, time\n\nimport aioxmpp.xso as xso\nimport aioxmpp.structs as structs\n\n\nclass Unknown(unittest.TestCase):\n def test_init(self):\n u = xso.Unknown(unittest.mock.sentinel.value)\n self.assertEqual(\n u.value,\n unittest.mock.sentinel.value,\n )\n\n def test_init_default(self):\n with self.assertRaises(TypeError):\n xso.Unknown()\n\n def test_value_not_settable(self):\n u = xso.Unknown(unittest.mock.sentinel.value)\n with self.assertRaises(AttributeError):\n u.value = \"foobar\"\n\n def test_hash_equal_to_value_hash(self):\n values = [\n None,\n \"foobar\",\n object(),\n 10,\n 10.2,\n ]\n\n for value in values:\n u = xso.Unknown(value)\n self.assertEqual(hash(u), hash(value))\n\n def test_equality(self):\n values = [\n None,\n \"foobar\",\n object(),\n 10,\n 10.2,\n ]\n\n for v1, v2 in itertools.product(values, values):\n if v1 == v2:\n self.assertTrue(xso.Unknown(v1) == xso.Unknown(v2))\n self.assertFalse(xso.Unknown(v1) != xso.Unknown(v2))\n else:\n self.assertFalse(xso.Unknown(v1) == xso.Unknown(v2))\n self.assertTrue(xso.Unknown(v1) != xso.Unknown(v2))\n self.assertFalse(v1 == xso.Unknown(v2))\n self.assertFalse(xso.Unknown(v2) == v1)\n self.assertTrue(v1 != xso.Unknown(v2))\n self.assertTrue(xso.Unknown(v2) != v1)\n\n def test_repr(self):\n values = [\n None,\n \"foobar\",\n object(),\n 10,\n 10.2,\n ]\n\n for v in values:\n self.assertEqual(\n repr(xso.Unknown(v)),\n \"\".format(v),\n )\n\n\nclass TestAbstractCDataType(unittest.TestCase):\n class DummyType(xso.AbstractCDataType):\n def parse(self, v):\n pass\n\n def test_is_abstract(self):\n self.assertIsInstance(\n xso.AbstractCDataType,\n abc.ABCMeta)\n with self.assertRaises(TypeError):\n xso.AbstractCDataType()\n\n def test_parse_method(self):\n self.assertTrue(inspect.isfunction(xso.AbstractCDataType.parse))\n\n def test_format_method(self):\n self.assertTrue(inspect.isfunction(xso.AbstractCDataType.format))\n self.assertEqual(\n \"foo\",\n self.DummyType().format(\"foo\"))\n self.assertEqual(\n \"23\",\n self.DummyType().format(23))\n\n\nclass TestAbstractElementType(unittest.TestCase):\n class DummyType(xso.AbstractElementType):\n def unpack(self, obj):\n pass\n\n def pack(self, v):\n pass\n\n def get_xso_types(self):\n pass\n\n def test_is_abstract(self):\n self.assertIsInstance(\n xso.AbstractElementType,\n abc.ABCMeta)\n with self.assertRaises(TypeError):\n xso.AbstractElementType()\n\n def test_unpack_method(self):\n self.assertTrue(inspect.isfunction(xso.AbstractElementType.unpack))\n\n def test_coerce_method(self):\n self.assertTrue(inspect.isfunction(xso.AbstractElementType.coerce))\n self.assertEqual(\n self.DummyType().coerce(unittest.mock.sentinel.value),\n unittest.mock.sentinel.value\n )\n\n def test_get_xso_types(self):\n self.assertTrue(inspect.isfunction(\n xso.AbstractElementType.get_xso_types\n ))\n\n def test_pack_method(self):\n self.assertTrue(inspect.isfunction(xso.AbstractElementType.pack))\n\n\nclass TestStringType(unittest.TestCase):\n def test_is_cdata_type(self):\n self.assertIsInstance(\n xso.String(),\n xso.AbstractCDataType)\n\n def test_parse(self):\n t = xso.String()\n self.assertEqual(\n \"foo\",\n t.parse(\"foo\"))\n\n def test_format(self):\n t = xso.String()\n self.assertEqual(\n \"foo\",\n t.format(\"foo\"))\n\n def test_coerce_passes_string(self):\n t = xso.String()\n s = \"foobar\"\n self.assertIs(s, t.coerce(s))\n\n def test_coerce_rejects_non_strings(self):\n t = xso.String()\n\n values = [\n 1.2,\n decimal.Decimal(\"1\"),\n fractions.Fraction(1, 1),\n [],\n (),\n 1.\n ]\n\n for value in values:\n with self.assertRaisesRegex(TypeError, \"must be a str\"):\n t.coerce(value)\n\n def test_coerce_stringprep(self):\n prepfunc = unittest.mock.Mock()\n t = xso.String(prepfunc=prepfunc)\n\n result = t.coerce(\"foobar\")\n\n self.assertSequenceEqual(\n [\n unittest.mock.call(\"foobar\"),\n ],\n prepfunc.mock_calls\n )\n\n self.assertEqual(\n prepfunc(),\n result,\n )\n\n def test_parse_stringprep(self):\n prepfunc = unittest.mock.Mock()\n t = xso.String(prepfunc=prepfunc)\n\n result = t.parse(\"foobar\")\n\n self.assertSequenceEqual(\n [\n unittest.mock.call(\"foobar\"),\n ],\n prepfunc.mock_calls\n )\n\n self.assertEqual(\n prepfunc(),\n result,\n )\n\n\nclass TestIntegerType(unittest.TestCase):\n def test_is_cdata_type(self):\n self.assertIsInstance(\n xso.Integer(),\n xso.AbstractCDataType)\n\n def test_parse(self):\n t = xso.Integer()\n self.assertEqual(\n 123,\n t.parse(\"123\"))\n\n def test_parse_failure(self):\n t = xso.Integer()\n with self.assertRaises(ValueError):\n t.parse(\"123f\")\n\n def test_format(self):\n t = xso.Integer()\n self.assertEqual(\n \"123\",\n t.format(123))\n\n def test_coerce_passes_integral_numbers(self):\n t = xso.Integer()\n\n values = [-2, 0, 1, 2, 3, 4, 100]\n\n for value in values:\n self.assertIs(value, t.coerce(value))\n\n import random\n value = random.randint(1, 1e10)\n self.assertIs(value, t.coerce(value))\n value = -value\n self.assertIs(value, t.coerce(value))\n\n def test_coerce_requires_integral_number(self):\n t = xso.Integer()\n\n values = [\n 1.2,\n \"1\",\n decimal.Decimal(\"1\"),\n fractions.Fraction(1, 1),\n \"foo\",\n [],\n (),\n 1.\n ]\n\n for value in values:\n with self.assertRaisesRegex(\n TypeError,\n \"must be integral number\"):\n t.coerce(value)\n\n\nclass TestFloatType(unittest.TestCase):\n def test_is_cdata_type(self):\n self.assertIsInstance(\n xso.Float(),\n xso.AbstractCDataType)\n\n def test_parse(self):\n t = xso.Float()\n self.assertEqual(\n 123.3,\n t.parse(\"123.3\"))\n\n def test_parse_failure(self):\n t = xso.Float()\n with self.assertRaises(ValueError):\n t.parse(\"123.3f\")\n\n def test_format(self):\n t = xso.Float()\n self.assertEqual(\n \"123.3\",\n t.format(123.3))\n\n def test_coerce_passes_real_numbers(self):\n t = xso.Float()\n\n values = [\n # decimal.Decimal(\"1.23\"),\n fractions.Fraction(1, 9),\n 1.234,\n 20,\n -1,\n -3.4,\n ]\n\n for value in values:\n self.assertEqual(\n float(value),\n t.coerce(value)\n )\n\n def test_coerce_passes_decimal(self):\n t = xso.Float()\n\n values = [\n decimal.Decimal(\"1.23\"),\n ]\n\n for value in values:\n self.assertEqual(\n float(value),\n t.coerce(value)\n )\n\n def test_coerce_requires_float_number(self):\n t = xso.Float()\n\n values = [\n \"foo\",\n [],\n ()\n ]\n\n for value in values:\n with self.assertRaisesRegex(\n TypeError,\n \"must be real number\"):\n t.coerce(value)\n\n\nclass TestBoolType(unittest.TestCase):\n def test_is_cdata_type(self):\n self.assertIsInstance(\n xso.Bool(),\n xso.AbstractCDataType)\n\n def test_parse(self):\n t = xso.Bool()\n self.assertTrue(t.parse(\"true\"))\n self.assertTrue(t.parse(\"1\"))\n self.assertTrue(t.parse(\" true \"))\n self.assertTrue(t.parse(\" 1 \"))\n self.assertFalse(t.parse(\"false\"))\n self.assertFalse(t.parse(\"0\"))\n self.assertFalse(t.parse(\" false \"))\n self.assertFalse(t.parse(\" 0 \"))\n\n def test_parse_failure(self):\n t = xso.Bool()\n with self.assertRaises(ValueError):\n t.parse(\"foobar\")\n with self.assertRaises(ValueError):\n t.parse(\"truefoo\")\n with self.assertRaises(ValueError):\n t.parse(\"0foo\")\n\n def test_format(self):\n t = xso.Bool()\n self.assertEqual(\n \"true\",\n t.format(True))\n self.assertEqual(\n \"false\",\n t.format(False))\n\n def test_coerce_anything(self):\n t = xso.Bool()\n mock = unittest.mock.MagicMock()\n\n result = mock.__bool__()\n mock.reset_mock()\n\n self.assertEqual(\n result,\n t.coerce(mock))\n\n mock.__bool__.assert_called_once_with()\n\n\nclass TestDateTimeType(unittest.TestCase):\n def test_is_cdata_type(self):\n self.assertIsInstance(\n xso.DateTime(),\n xso.AbstractCDataType)\n\n def test_parse_example(self):\n t = xso.DateTime()\n self.assertEqual(\n datetime(2014, 1, 26, 19, 40, 10, tzinfo=pytz.utc),\n t.parse(\"2014-01-26T19:40:10Z\"))\n\n def test_parse_timezoned(self):\n t = xso.DateTime()\n self.assertEqual(\n datetime(2014, 1, 26, 19, 40, 10, tzinfo=pytz.utc),\n t.parse(\"2014-01-26T20:40:10+01:00\"))\n\n def test_parse_local(self):\n t = xso.DateTime()\n self.assertEqual(\n datetime(2014, 1, 26, 20, 40, 10),\n t.parse(\"2014-01-26T20:40:10\"))\n\n def test_parse_milliseconds(self):\n t = xso.DateTime()\n self.assertEqual(\n datetime(2014, 1, 26, 20, 40, 10, 123456),\n t.parse(\"2014-01-26T20:40:10.123456\"))\n\n def test_parse_milliseconds_timezoned(self):\n t = xso.DateTime()\n self.assertEqual(\n datetime(2014, 1, 26, 19, 40, 10, 123456, tzinfo=pytz.utc),\n t.parse(\"2014-01-26T20:40:10.123456+01:00\"))\n\n def test_parse_need_time(self):\n t = xso.DateTime()\n with self.assertRaises(ValueError):\n t.parse(\"2014-01-26\")\n\n def test_parse_need_date(self):\n t = xso.DateTime()\n with self.assertRaises(ValueError):\n t.parse(\"20:40:10\")\n\n def test_format_timezoned(self):\n t = xso.DateTime()\n self.assertEqual(\n \"2014-01-26T19:40:10Z\",\n t.format(datetime(2014, 1, 26, 19, 40, 10, tzinfo=pytz.utc))\n )\n\n def test_format_timezoned_microseconds(self):\n t = xso.DateTime()\n self.assertEqual(\n \"2014-01-26T19:40:10.1234Z\",\n t.format(datetime(2014, 1, 26, 19, 40, 10, 123400,\n tzinfo=pytz.utc))\n )\n\n def test_format_naive(self):\n t = xso.DateTime()\n self.assertEqual(\n \"2014-01-26T19:40:10\",\n t.format(datetime(2014, 1, 26, 19, 40, 10))\n )\n\n def test_format_naive_microseconds(self):\n t = xso.DateTime()\n self.assertEqual(\n \"2014-01-26T19:40:10.1234\",\n t.format(datetime(2014, 1, 26, 19, 40, 10, 123400))\n )\n\n def test_format_timezoned_nonutc(self):\n t = xso.DateTime()\n self.assertEqual(\n \"2014-01-26T19:40:10Z\",\n t.format(pytz.timezone(\"Europe/Berlin\").localize(\n datetime(2014, 1, 26, 20, 40, 10)\n ))\n )\n\n def test_parse_xep0082_examples(self):\n t = xso.DateTime()\n self.assertEqual(\n t.parse(\"1969-07-21T02:56:15Z\"),\n datetime(1969, 7, 21, 2, 56, 15, tzinfo=pytz.utc)\n )\n self.assertEqual(\n t.parse(\"1969-07-20T21:56:15-05:00\"),\n datetime(1969, 7, 21, 2, 56, 15, tzinfo=pytz.utc)\n )\n\n def test_parse_legacy_format(self):\n t = xso.DateTime()\n self.assertEqual(\n t.parse(\"19690721T02:56:15\"),\n datetime(1969, 7, 21, 2, 56, 15, tzinfo=pytz.utc)\n )\n\n def test_emit_legacy_format_with_switch(self):\n t = xso.DateTime(legacy=True)\n self.assertEqual(\n \"19690721T02:56:15\",\n t.format(datetime(1969, 7, 21, 2, 56, 15, tzinfo=pytz.utc))\n )\n self.assertEqual(\n \"20140126T19:40:10\",\n t.format(pytz.timezone(\"Europe/Berlin\").localize(\n datetime(2014, 1, 26, 20, 40, 10)\n ))\n )\n\n def test_require_datetime(self):\n t = xso.DateTime()\n\n values = [\n 1,\n \"foo\",\n \"2014-01-26T19:47:10Z\",\n 12.3,\n ]\n\n for value in values:\n with self.assertRaisesRegex(\n TypeError,\n \"must be a datetime object\"):\n t.coerce(value)\n\n def test_pass_datetime(self):\n t = xso.DateTime()\n\n dt = datetime.utcnow()\n self.assertIs(\n dt,\n t.coerce(dt)\n )\n\n\nclass TestDate(unittest.TestCase):\n def test_is_cdata_type(self):\n self.assertIsInstance(\n xso.Date(),\n xso.AbstractCDataType)\n\n def test_parse(self):\n t = xso.Date()\n self.assertEqual(\n t.parse(\"1776-07-04\"),\n date(1776, 7, 4),\n )\n\n def test_format(self):\n t = xso.Date()\n self.assertEqual(\n t.format(date(1776, 7, 4)),\n \"1776-07-04\",\n )\n\n def test_coerce_rejects_datetime(self):\n t = xso.Date()\n with self.assertRaisesRegex(\n TypeError,\n \"must be a date object\"):\n t.coerce(datetime.utcnow())\n\n def test_coerce_rejects_time(self):\n t = xso.Date()\n with self.assertRaisesRegex(\n TypeError,\n \"must be a date object\"):\n t.coerce(datetime.utcnow().time())\n\n def test_coerce_accepts_date(self):\n t = xso.Date()\n v = datetime.utcnow().date()\n self.assertEqual(t.coerce(v), v)\n\n\nclass TestTime(unittest.TestCase):\n def test_is_cdata_type(self):\n self.assertIsInstance(\n xso.Time(),\n xso.AbstractCDataType)\n\n def test_parse_example(self):\n t = xso.Time()\n self.assertEqual(\n time(19, 40, 10, tzinfo=pytz.utc),\n t.parse(\"19:40:10Z\"))\n\n def test_parse_timezoned(self):\n t = xso.Time()\n self.assertEqual(\n time(19, 40, 10, tzinfo=pytz.utc),\n t.parse(\"20:40:10+01:00\"))\n\n def test_parse_local(self):\n t = xso.Time()\n self.assertEqual(\n time(20, 40, 10),\n t.parse(\"20:40:10\"))\n\n def test_parse_milliseconds(self):\n t = xso.Time()\n self.assertEqual(\n time(20, 40, 10, 123456),\n t.parse(\"20:40:10.123456\"))\n\n def test_parse_milliseconds_timezoned(self):\n t = xso.Time()\n self.assertEqual(\n time(19, 40, 10, 123456, tzinfo=pytz.utc),\n t.parse(\"20:40:10.123456+01:00\"))\n\n def test_format_timezoned(self):\n t = xso.Time()\n self.assertEqual(\n \"19:40:10Z\",\n t.format(time(19, 40, 10, tzinfo=pytz.utc))\n )\n\n def test_format_timezoned_microseconds(self):\n t = xso.Time()\n self.assertEqual(\n \"19:40:10.1234Z\",\n t.format(time(19, 40, 10, 123400,\n tzinfo=pytz.utc))\n )\n\n def test_format_naive(self):\n t = xso.Time()\n self.assertEqual(\n \"19:40:10\",\n t.format(time(19, 40, 10))\n )\n\n def test_format_naive_microseconds(self):\n t = xso.Time()\n self.assertEqual(\n \"19:40:10.1234\",\n t.format(time(19, 40, 10, 123400))\n )\n\n def test_coerce_rejects_non_utc_timezone(self):\n t = xso.Time()\n with self.assertRaisesRegex(\n ValueError,\n \"time must have UTC timezone or none at all\"):\n t.coerce(pytz.timezone(\"Europe/Berlin\").localize(\n datetime(2014, 1, 26, 20, 40, 10)\n ).timetz())\n\n def test_coerce_accepts_naive_timezone(self):\n t = xso.Time()\n v = time(20, 40, 10)\n result = t.coerce(v)\n self.assertEqual(v, result)\n\n def test_coerce_accepts_utc_timezone(self):\n t = xso.Time()\n v = time(20, 40, 10, tzinfo=pytz.utc)\n result = t.coerce(v)\n self.assertEqual(v, result)\n\n def test_coerce_rejects_datetime(self):\n t = xso.Time()\n with self.assertRaisesRegex(\n TypeError,\n \"must be a time object\"):\n t.coerce(datetime.utcnow())\n\n def test_coerce_rejects_date(self):\n t = xso.Time()\n with self.assertRaisesRegex(\n TypeError,\n \"must be a time object\"):\n t.coerce(datetime.utcnow().date())\n\n\nclass TestBase64Binary(unittest.TestCase):\n def test_is_cdata_type(self):\n self.assertIsInstance(\n xso.Base64Binary(),\n xso.AbstractCDataType)\n\n def test_parse(self):\n t = xso.Base64Binary()\n self.assertEqual(\n b\"fnord\",\n t.parse(\"Zm5vcmQ=\")\n )\n\n def test_parse_empty(self):\n t = xso.Base64Binary()\n self.assertEqual(\n b\"\",\n t.parse(\"\")\n )\n self.assertEqual(\n b\"\",\n t.parse(\"=\")\n )\n\n def test_format(self):\n t = xso.Base64Binary()\n self.assertEqual(\n \"Zm5vcmQ=\",\n t.format(b\"fnord\")\n )\n\n def test_format_empty_default(self):\n t = xso.Base64Binary()\n self.assertEqual(\n \"\",\n t.format(b\"\")\n )\n\n def test_format_empty_with_empty_as_equal_flag(self):\n t = xso.Base64Binary(empty_as_equal=True)\n self.assertEqual(\n \"=\",\n t.format(b\"\")\n )\n\n def test_format_long(self):\n t = xso.Base64Binary()\n self.assertEqual(\n \"Zm5vcmRmbm9yZGZub3JkZm5vcmRmbm9yZGZub3JkZm5vcmRmbm9yZG\"\n \"Zub3JkZm5vcmRmbm9yZGZub3JkZm5vcmRmbm9yZGZub3JkZm5vcmRm\"\n \"bm9yZGZub3JkZm5vcmRmbm9yZA==\",\n t.format(b\"fnord\"*20)\n )\n\n def test_coerce_rejects_int(self):\n t = xso.Base64Binary()\n with self.assertRaisesRegex(TypeError,\n \"must be convertible to bytes\"):\n t.coerce(12)\n\n def test_coerce_accepts_bytes_bytearray_array(self):\n t = xso.Base64Binary()\n\n import array\n array_value = array.array(\"h\")\n array_value.append(1234)\n array_value.append(5678)\n array_value.append(910)\n\n values = [\n b\"foobar\",\n bytearray(b\"baz\"),\n ]\n\n for value in values:\n result = t.coerce(value)\n self.assertEqual(\n bytes(value),\n result\n )\n self.assertIsInstance(\n result,\n bytes\n )\n\n def test_coerce_passes_bytes(self):\n t = xso.Base64Binary()\n\n value = b\"foo\"\n\n self.assertIs(\n value,\n t.coerce(value)\n )\n\n\nclass TestHexBinary(unittest.TestCase):\n def test_is_cdata_type(self):\n self.assertIsInstance(\n xso.HexBinary(),\n xso.AbstractCDataType)\n\n def test_parse(self):\n t = xso.HexBinary()\n self.assertEqual(\n b\"fnord\",\n t.parse(\"666e6f7264\")\n )\n\n def test_format(self):\n t = xso.HexBinary()\n self.assertEqual(\n \"666e6f7264\",\n t.format(b\"fnord\")\n )\n\n def test_coerce_rejects_int(self):\n t = xso.HexBinary()\n with self.assertRaisesRegex(TypeError,\n \"must be convertible to bytes\"):\n t.coerce(12)\n\n def test_coerce_accepts_bytes_bytearray_array(self):\n t = xso.HexBinary()\n\n import array\n array_value = array.array(\"h\")\n array_value.append(1234)\n array_value.append(5678)\n array_value.append(910)\n\n values = [\n b\"foobar\",\n bytearray(b\"baz\"),\n ]\n\n for value in values:\n result = t.coerce(value)\n self.assertEqual(\n bytes(value),\n result\n )\n self.assertIsInstance(\n result,\n bytes\n )\n\n def test_coerce_passes_bytes(self):\n t = xso.HexBinary()\n\n value = b\"foo\"\n\n self.assertIs(\n value,\n t.coerce(value)\n )\n\n\nclass TestJID(unittest.TestCase):\n def test_is_cdata_type(self):\n self.assertIsInstance(\n xso.JID(),\n xso.AbstractCDataType)\n\n def test_parse(self):\n t = xso.JID()\n self.assertEqual(\n structs.JID(\"foo\", \"example.test\", \"bar\"),\n t.parse(\"foo@example.test/bar\")\n )\n\n def test_parse_uses_nonstrict_by_default(self):\n with unittest.mock.patch(\"aioxmpp.structs.JID\") as JID:\n t = xso.JID()\n result = t.parse(unittest.mock.sentinel.jidstr)\n\n JID.fromstr.assert_called_with(\n unittest.mock.sentinel.jidstr,\n strict=False\n )\n\n self.assertEqual(result, JID.fromstr())\n\n def test_parse_can_be_set_to_strict(self):\n with unittest.mock.patch(\"aioxmpp.structs.JID\") as JID:\n t = xso.JID(strict=True)\n result = t.parse(unittest.mock.sentinel.jidstr)\n\n JID.fromstr.assert_called_with(\n unittest.mock.sentinel.jidstr,\n strict=True\n )\n\n self.assertEqual(result, JID.fromstr())\n\n def test_format(self):\n t = xso.JID()\n self.assertEqual(\n \"ssa@ix.test/IX\",\n t.format(structs.JID(\"ßA\", \"IX.test\", \"\\u2168\"))\n )\n\n def test_coerce_rejects_non_jids(self):\n t = xso.JID()\n types = [str, int, float, object]\n for type_ in types:\n with self.assertRaisesRegex(TypeError,\n \"not a JID\"):\n t.coerce(type_())\n\n def test_coerce_rejects_str_jids(self):\n t = xso.JID()\n with self.assertRaisesRegex(\n TypeError,\n \" object 'foo@bar' is not a JID\"):\n t.coerce(\"foo@bar\")\n\n def test_coerce_passes_jid(self):\n t = xso.JID()\n\n values = [\n structs.JID.fromstr(\"foo@bar.example\"),\n structs.JID.fromstr(\"bar.example\"),\n structs.JID.fromstr(\"foo@bar.example/baz\"),\n ]\n\n for value in values:\n self.assertIs(\n value,\n t.coerce(value)\n )\n\n\nclass TestConnectionLocation(unittest.TestCase):\n def test_is_cdata_type(self):\n self.assertIsInstance(\n xso.ConnectionLocation(),\n xso.AbstractCDataType)\n\n def test_parse_ipv6(self):\n t = xso.ConnectionLocation()\n self.assertEqual(\n (ipaddress.IPv6Address(\"fe80::\"), 5222),\n t.parse(\"[fe80::]:5222\")\n )\n\n def test_parse_ipv6_without_port_number(self):\n t = xso.ConnectionLocation()\n self.assertEqual(\n (ipaddress.IPv6Address(\"fe80::\"), 5222),\n t.parse(\"[fe80::]\")\n )\n\n def test_invalid_ipv6(self):\n t = xso.ConnectionLocation()\n with self.assertRaises(ValueError):\n t.parse(\"fe80:::5222\")\n\n\n def test_reject_non_integer_port_number(self):\n t = xso.ConnectionLocation()\n with self.assertRaises(ValueError):\n t.parse(\"[fe80::]:23.4\")\n\n def test_reject_out_of_range_port_number(self):\n t = xso.ConnectionLocation()\n with self.assertRaises(ValueError):\n t.parse(\"[fe80::]:1000000\")\n\n def test_parse_ipv4(self):\n t = xso.ConnectionLocation()\n self.assertEqual(\n (ipaddress.IPv4Address(\"10.0.0.1\"), 5223),\n t.parse(\"10.0.0.1:5223\")\n )\n\n def test_parse_ipv4_without_port_number(self):\n t = xso.ConnectionLocation()\n self.assertEqual(\n (ipaddress.IPv4Address(\"10.0.0.1\"), 5222),\n t.parse(\"10.0.0.1\")\n )\n\n def test_parse_hostname(self):\n t = xso.ConnectionLocation()\n self.assertEqual(\n (\"foo.bar.example\", 5234),\n t.parse(\"foo.bar.example:5234\")\n )\n\n def test_parse_hostname_without_port_number(self):\n t = xso.ConnectionLocation()\n self.assertEqual(\n (\"foo.bar.example\", 5222),\n t.parse(\"foo.bar.example\")\n )\n\n def test_format_ipv6(self):\n t = xso.ConnectionLocation()\n self.assertEqual(\n \"[fe80::]:5222\",\n t.format((ipaddress.IPv6Address(\"fe80::\"), 5222))\n )\n\n def test_format_ipv4(self):\n t = xso.ConnectionLocation()\n self.assertEqual(\n \"10.0.0.1:1234\",\n t.format((ipaddress.IPv4Address(\"10.0.0.1\"), 1234))\n )\n\n def test_format_hostname(self):\n t = xso.ConnectionLocation()\n self.assertEqual(\n \"foo.bar.baz:5234\",\n t.format((\"foo.bar.baz\", 5234))\n )\n\n def test_coerce_rejects_non_2tuples(self):\n t = xso.ConnectionLocation()\n\n values = [\n [\"foo\", 1234],\n {\"foo\", 1234},\n (\"foo\", 1234, \"bar\")\n ]\n\n for value in values:\n with self.assertRaisesRegex(TypeError,\n \"2-tuple required\"):\n t.coerce(value)\n\n def test_coerce_parses_ip_addresses(self):\n t = xso.ConnectionLocation()\n\n value_pairs = [\n ((\"10.0.0.1\", 1234), (ipaddress.IPv4Address(\"10.0.0.1\"), 1234)),\n ((\"fe80::\", 1234), (ipaddress.IPv6Address(\"fe80::\"), 1234)),\n ((\"10.0.foobar\", 1234), (\"10.0.foobar\", 1234)),\n ]\n\n for given, expected in value_pairs:\n self.assertEqual(\n expected,\n t.coerce(given)\n )\n\n def test_coerce_restricts_port_numbers(self):\n t = xso.ConnectionLocation()\n\n err_values = [\n (\"foobar\", -1),\n (\"foobar\", 65536),\n ]\n\n for err_value in err_values:\n with self.assertRaisesRegex(ValueError, \"out of range\"):\n t.coerce(err_value)\n\n ok_values = [\n (\"foobar\", 0),\n (\"foobar\", 65535),\n ]\n\n for ok_value in ok_values:\n self.assertEqual(\n ok_value,\n t.coerce(ok_value)\n )\n\n def test_coerce_requires_integral_number(self):\n t = xso.ConnectionLocation()\n\n values = [\n (\"foobar\", 1.2),\n (\"foobar\", \"1\"),\n (\"foobar\", decimal.Decimal(\"1\")),\n (\"foobar\", fractions.Fraction(1, 1)),\n ]\n\n for value in values:\n with self.assertRaisesRegex(\n TypeError,\n \"port number must be integral number\"):\n t.coerce(value)\n\n\nclass TestLanguageTag(unittest.TestCase):\n def test_is_cdata_type(self):\n self.assertIsInstance(\n xso.LanguageTag(),\n xso.AbstractCDataType)\n\n def test_parse(self):\n t = xso.LanguageTag()\n self.assertEqual(\n structs.LanguageTag.fromstr(\"de-Latn-DE-1999\"),\n t.parse(\"de-Latn-DE-1999\")\n )\n\n def test_format(self):\n t = xso.LanguageTag()\n self.assertEqual(\n \"de-Latn-DE-1999\",\n t.format(structs.LanguageTag.fromstr(\"de-Latn-DE-1999\"))\n )\n\n def test_coerce_passes_language_tags(self):\n t = xso.LanguageTag()\n tag = structs.LanguageTag.fromstr(\"foo\")\n self.assertIs(\n tag,\n t.coerce(tag)\n )\n\n def test_coerce_rejects_non_language_tags(self):\n t = xso.LanguageTag()\n\n values = [\n 1.2,\n decimal.Decimal(\"1\"),\n fractions.Fraction(1, 1),\n [],\n (),\n 1.,\n \"foo\",\n ]\n\n for value in values:\n with self.assertRaisesRegex(\n TypeError,\n \"is not a LanguageTag\"):\n t.coerce(value)\n\n\nclass TestJSON(unittest.TestCase):\n def test_is_cdata_type(self):\n self.assertTrue(issubclass(\n xso.JSON,\n xso.AbstractCDataType,\n ))\n\n def test_parse_loads_as_json_via_instance(self):\n j = xso.JSON()\n\n with contextlib.ExitStack() as stack:\n loads = stack.enter_context(unittest.mock.patch(\n \"json.loads\",\n return_value=unittest.mock.sentinel.parsed,\n ))\n\n result = j.parse(\n unittest.mock.sentinel.cdata,\n )\n\n loads.assert_called_once_with(unittest.mock.sentinel.cdata)\n\n self.assertEqual(result, unittest.mock.sentinel.parsed)\n\n def test_format_dumps_as_json_via_instance(self):\n j = xso.JSON()\n\n with contextlib.ExitStack() as stack:\n dumps = stack.enter_context(unittest.mock.patch(\n \"json.dumps\",\n return_value=unittest.mock.sentinel.serialised,\n ))\n\n result = j.format(\n unittest.mock.sentinel.data,\n )\n\n dumps.assert_called_once_with(unittest.mock.sentinel.data)\n\n self.assertEqual(result, unittest.mock.sentinel.serialised)\n\n def test_coerce_passes_everything_via_instance(self):\n value = object()\n\n self.assertIs(\n xso.JSON().coerce(value),\n value,\n )\n\n\nclass TestTextChildMap(unittest.TestCase):\n def test_is_element_type(self):\n self.assertTrue(issubclass(\n xso.TextChildMap,\n xso.AbstractElementType\n ))\n\n def setUp(self):\n self.type_ = xso.TextChildMap(xso.AbstractTextChild)\n\n def tearDown(self):\n del self.type_\n\n def test_get_xso_types(self):\n self.assertCountEqual(\n self.type_.get_xso_types(),\n [xso.AbstractTextChild]\n )\n\n def test_unpack(self):\n text, lang = \"foo\", structs.LanguageTag.fromstr(\"en-gb\")\n item = xso.AbstractTextChild(text, lang)\n self.assertEqual(\n (lang, text),\n self.type_.unpack(item)\n )\n\n def test_pack(self):\n text, lang = \"foo\", structs.LanguageTag.fromstr(\"en-gb\")\n item = self.type_.pack((lang, text))\n\n self.assertEqual(item.text, text)\n self.assertEqual(item.lang, lang)\n\n\nclass TestEnumCDataType(unittest.TestCase):\n class SomeEnum(Enum):\n X = 1\n Y = 2\n Z = 3\n\n class SomeIntEnum(IntEnum):\n X = 1\n Y = 2\n Z = 3\n\n def test_is_cdata_type(self):\n self.assertTrue(issubclass(\n xso.EnumCDataType,\n xso.AbstractCDataType,\n ))\n\n def test_init_default(self):\n with self.assertRaises(TypeError):\n xso.EnumCDataType()\n\n def test_init_with_enum(self):\n e = xso.EnumCDataType(self.SomeEnum)\n self.assertIs(\n e.enum_class,\n self.SomeEnum\n )\n self.assertIsInstance(\n e.nested_type,\n xso.String,\n )\n\n def test_init_with_custom_nested_type(self):\n e = xso.EnumCDataType(\n self.SomeEnum,\n nested_type=unittest.mock.sentinel.nested_type\n )\n self.assertIs(\n e.enum_class,\n self.SomeEnum\n )\n self.assertIs(\n e.nested_type,\n unittest.mock.sentinel.nested_type,\n )\n\n def test_parse_uses_enum_and_nested_type(self):\n enum_class = unittest.mock.Mock()\n nested_type = unittest.mock.Mock()\n e = xso.EnumCDataType(enum_class, nested_type)\n\n result = e.parse(unittest.mock.sentinel.value)\n\n nested_type.parse.assert_called_with(\n unittest.mock.sentinel.value,\n )\n\n enum_class.assert_called_with(\n nested_type.parse(),\n )\n\n self.assertEqual(\n result,\n enum_class(),\n )\n\n def test_parse_works_with_actual_enum(self):\n e = xso.EnumCDataType(self.SomeEnum, xso.Integer())\n for enum_value in self.SomeEnum:\n self.assertEqual(\n e.parse(str(enum_value.value)),\n enum_value,\n )\n\n def test_format_uses_enum_value_and_nested_type(self):\n enum_class = unittest.mock.Mock()\n enum_value = unittest.mock.Mock()\n nested_type = unittest.mock.Mock()\n e = xso.EnumCDataType(enum_class, nested_type)\n\n result = e.format(enum_value)\n\n nested_type.format.assert_called_with(\n enum_value.value,\n )\n\n self.assertEqual(\n result,\n nested_type.format(),\n )\n\n def test_format_works_with_actual_enums(self):\n e = xso.EnumCDataType(self.SomeEnum, xso.Integer())\n for enum_value in self.SomeEnum:\n self.assertEqual(\n e.format(enum_value),\n str(enum_value.value),\n )\n\n def test_get_formatted_type_not_implemented(self):\n self.assertFalse(\n hasattr(xso.EnumCDataType, \"get_formatted_type\")\n )\n\n def test_get_xso_types_not_implemented(self):\n self.assertFalse(\n hasattr(xso.EnumCDataType, \"get_xso_types\")\n )\n\n def test_pass_Enum_values_through_coerce(self):\n e = xso.EnumCDataType(self.SomeEnum)\n for enum_value in self.SomeEnum:\n self.assertIs(enum_value, e.coerce(enum_value))\n\n def test_reject_non_Enum_values_on_coerce(self):\n wrong = [\n 1,\n \"10\",\n 10.2,\n object()\n ]\n\n e = xso.EnumCDataType(self.SomeEnum)\n\n for thing in wrong:\n with self.assertRaises(TypeError):\n e.coerce(thing)\n\n def test_try_to_coerce_if_allow_coerce_is_set(self):\n nested_t = xso.Integer()\n t = xso.EnumCDataType(\n self.SomeEnum,\n nested_t,\n allow_coerce=True,\n )\n\n with contextlib.ExitStack() as stack:\n enum_constructor = stack.enter_context(unittest.mock.patch.object(\n type(self.SomeEnum), \"__call__\"\n ))\n enum_constructor.return_value = unittest.mock.sentinel.wrapped\n\n w = stack.enter_context(warnings.catch_warnings())\n coerce = stack.enter_context(\n unittest.mock.patch.object(nested_t, \"coerce\")\n )\n\n coerce.return_value = unittest.mock.sentinel.coerced\n result = t.coerce(unittest.mock.sentinel.value)\n\n enum_constructor.assert_called_with(\n unittest.mock.sentinel.coerced,\n )\n\n self.assertEqual(\n result,\n unittest.mock.sentinel.wrapped,\n )\n\n self.assertFalse(w)\n\n def test_pass_Enum_values_through_coerce_if_coercion_is_allowed(self):\n e = xso.EnumCDataType(self.SomeEnum, allow_coerce=True)\n for enum_value in self.SomeEnum:\n self.assertIs(enum_value, e.coerce(enum_value))\n\n def test_value_error_propagates(self):\n exc = ValueError()\n\n with contextlib.ExitStack() as stack:\n enum_constructor = stack.enter_context(unittest.mock.patch.object(\n type(self.SomeEnum), \"__call__\"\n ))\n enum_constructor.side_effect = exc\n e = xso.EnumCDataType(self.SomeEnum,\n xso.Integer(),\n allow_coerce=True)\n\n with self.assertRaises(ValueError) as ctx:\n e.coerce(1234)\n\n self.assertIs(ctx.exception, exc)\n\n def test_deprecate_coerce(self):\n enum_class = self.SomeEnum\n e = xso.EnumCDataType(\n enum_class,\n xso.Integer(),\n allow_coerce=True,\n deprecate_coerce=True,\n )\n\n with contextlib.ExitStack() as stack:\n warn = stack.enter_context(\n unittest.mock.patch(\n \"warnings.warn\",\n )\n )\n\n result = e.coerce(1)\n\n warn.assert_called_with(\n \"assignment of non-enum values to this descriptor is deprecated\",\n DeprecationWarning,\n stacklevel=4\n )\n\n self.assertEqual(\n result,\n enum_class(1),\n )\n\n def test_deprecate_coerce_custom_stacklevel(self):\n enum_class = self.SomeEnum\n e = xso.EnumCDataType(\n enum_class,\n xso.Integer(),\n allow_coerce=True,\n deprecate_coerce=unittest.mock.sentinel.stacklevel,\n )\n\n with contextlib.ExitStack() as stack:\n warn = stack.enter_context(\n unittest.mock.patch(\n \"warnings.warn\",\n )\n )\n\n result = e.coerce(1)\n\n warn.assert_called_with(\n \"assignment of non-enum values to this descriptor is deprecated\",\n DeprecationWarning,\n stacklevel=unittest.mock.sentinel.stacklevel\n )\n\n self.assertEqual(\n result,\n enum_class(1),\n )\n\n def test_deprecate_coerce_does_not_emit_warning_for_enum_value(self):\n enum_class = self.SomeEnum\n e = xso.EnumCDataType(\n enum_class,\n xso.Integer(),\n allow_coerce=True,\n deprecate_coerce=True,\n )\n\n value = enum_class.X\n\n with contextlib.ExitStack() as stack:\n warn = stack.enter_context(\n unittest.mock.patch(\n \"warnings.warn\",\n )\n )\n\n result = e.coerce(value)\n\n self.assertFalse(warn.mock_calls)\n\n self.assertIs(\n value,\n result,\n )\n\n def test_accept_unknown_by_default(self):\n enum_class = self.SomeEnum\n e = xso.EnumCDataType(\n enum_class,\n xso.Integer(),\n )\n\n value = e.coerce(xso.Unknown(10))\n self.assertIsInstance(value, xso.Unknown)\n self.assertEqual(xso.Unknown(10), value)\n\n def test_accept_unknown_can_be_turned_off(self):\n enum_class = self.SomeEnum\n e = xso.EnumCDataType(\n enum_class,\n xso.Integer(),\n accept_unknown=False,\n )\n\n with self.assertRaisesRegex(\n TypeError,\n r\"not a valid .* value: \"):\n e.coerce(xso.Unknown(10))\n\n def test_allow_unknown_by_default(self):\n enum_class = self.SomeEnum\n e = xso.EnumCDataType(\n enum_class,\n xso.Integer(),\n )\n\n value = e.parse(\"10\")\n self.assertIsInstance(value, xso.Unknown)\n self.assertEqual(xso.Unknown(10), value)\n\n def test_allow_unknown_can_be_turned_off(self):\n enum_class = self.SomeEnum\n e = xso.EnumCDataType(\n enum_class,\n xso.Integer(),\n allow_unknown=False,\n )\n\n with self.assertRaisesRegex(\n ValueError,\n r\"10 is not a valid .*SomeEnum\"):\n e.parse(10)\n\n def test_format_works_with_unknown(self):\n enum_class = self.SomeEnum\n e = xso.EnumCDataType(\n enum_class,\n xso.Integer(),\n )\n\n self.assertEqual(\n e.format(xso.Unknown(10)),\n \"10\",\n )\n\n def test_reject_pass_unknown_without_allow_unknown(self):\n enum_class = self.SomeEnum\n\n with self.assertRaisesRegex(\n ValueError,\n r\"pass_unknown requires allow_unknown and accept_unknown\"):\n xso.EnumCDataType(\n enum_class,\n xso.Integer(),\n allow_unknown=False,\n pass_unknown=True,\n )\n\n def test_reject_pass_unknown_without_accept_unknown(self):\n enum_class = self.SomeEnum\n\n with self.assertRaisesRegex(\n ValueError,\n r\"pass_unknown requires allow_unknown and accept_unknown\"):\n xso.EnumCDataType(\n enum_class,\n xso.Integer(),\n allow_unknown=False,\n pass_unknown=True,\n )\n\n def test_coerce_passes_non_members_with_pass_unknown(self):\n t = xso.EnumCDataType(\n self.SomeIntEnum,\n xso.Integer(),\n pass_unknown=True,\n )\n\n v = t.coerce(10)\n self.assertFalse(isinstance(v, IntEnum))\n\n def test_coerce_passes_value_to_nested_type_coerce_with_pass_unknown(self):\n nested_t = xso.Integer()\n t = xso.EnumCDataType(\n self.SomeIntEnum,\n nested_t,\n pass_unknown=True,\n )\n\n with unittest.mock.patch.object(nested_t, \"coerce\") as coerce:\n coerce.return_value = unittest.mock.sentinel.coerced\n\n v = t.coerce(unittest.mock.sentinel.value)\n\n self.assertEqual(v, unittest.mock.sentinel.coerced)\n\n def test_coerce_converts_to_enum_members_if_allow_coerce_is_set(self):\n nested_t = xso.Integer()\n t = xso.EnumCDataType(\n self.SomeIntEnum,\n nested_t,\n allow_coerce=True,\n pass_unknown=True,\n )\n\n with unittest.mock.patch.object(nested_t, \"coerce\") as coerce:\n coerce.return_value = 2\n\n v = t.coerce(unittest.mock.sentinel.value)\n\n self.assertEqual(v, 2)\n self.assertIsInstance(v, self.SomeIntEnum)\n\n def test_coerce_does_not_convert_to_enum_members_if_allow_coerce_unset(self): # NOQA\n nested_t = xso.Integer()\n t = xso.EnumCDataType(\n self.SomeIntEnum,\n nested_t,\n allow_coerce=False,\n pass_unknown=True,\n )\n\n with unittest.mock.patch.object(nested_t, \"coerce\") as coerce:\n coerce.return_value = 2\n\n v = t.coerce(unittest.mock.sentinel.value)\n\n self.assertEqual(v, 2)\n self.assertFalse(isinstance(v, self.SomeIntEnum))\n\n def test_coerce_passes_non_members_with_pass_unknown_and_allow_coerce(self):\n t = xso.EnumCDataType(\n self.SomeIntEnum,\n xso.Integer(),\n allow_coerce=True,\n pass_unknown=True,\n )\n\n v = t.coerce(10)\n self.assertFalse(isinstance(v, IntEnum))\n\n def test_parse_passes_unwrapped_value_if_pass_unknown(self):\n t = xso.EnumCDataType(\n self.SomeIntEnum,\n xso.Integer(),\n pass_unknown=True,\n )\n\n v = t.parse(\"10\")\n self.assertEqual(v, 10)\n self.assertFalse(isinstance(v, xso.Unknown))\n\n def test_format_works_with_unwrapped_unknowns_if_pass_unknown(self):\n t = xso.EnumCDataType(\n self.SomeIntEnum,\n xso.Integer(),\n pass_unknown=True,\n )\n\n v = t.format(10)\n self.assertEqual(v, \"10\")\n\n\nclass TestEnumElementType(unittest.TestCase):\n class SomeEnum(Enum):\n X = 1\n Y = 2\n Z = 3\n\n class FancyType(xso.AbstractElementType):\n def get_xso_types(self):\n raise NotImplementedError\n\n def pack(self, obj):\n return obj\n\n def unpack(self, obj):\n return obj\n\n def test_is_element_type(self):\n self.assertTrue(issubclass(\n xso.EnumElementType,\n xso.AbstractElementType,\n ))\n\n def test_init_default(self):\n with self.assertRaises(TypeError):\n xso.EnumElementType()\n\n def test_init_with_enum(self):\n with self.assertRaises(TypeError):\n xso.EnumElementType(self.SomeEnum)\n\n def test_init_with_custom_nested_type(self):\n e = xso.EnumElementType(\n self.SomeEnum,\n nested_type=unittest.mock.sentinel.nested_type\n )\n self.assertIs(\n e.enum_class,\n self.SomeEnum\n )\n self.assertIs(\n e.nested_type,\n unittest.mock.sentinel.nested_type,\n )\n\n def test_unpack_uses_enum_and_nested_type(self):\n enum_class = unittest.mock.Mock()\n nested_type = unittest.mock.Mock()\n e = xso.EnumElementType(enum_class, nested_type)\n\n result = e.unpack(unittest.mock.sentinel.value)\n\n nested_type.unpack.assert_called_with(\n unittest.mock.sentinel.value,\n )\n\n enum_class.assert_called_with(\n nested_type.unpack(),\n )\n\n self.assertEqual(\n result,\n enum_class(),\n )\n\n def test_unpack_works_with_actual_enum(self):\n e = xso.EnumElementType(self.SomeEnum, self.FancyType())\n for enum_value in self.SomeEnum:\n self.assertEqual(\n e.unpack(enum_value.value),\n enum_value,\n )\n\n def test_pack_uses_enum_value_and_nested_type(self):\n enum_class = unittest.mock.Mock()\n enum_value = unittest.mock.Mock()\n nested_type = unittest.mock.Mock()\n e = xso.EnumElementType(enum_class, nested_type)\n\n result = e.pack(enum_value)\n\n nested_type.pack.assert_called_with(\n enum_value.value,\n )\n\n self.assertEqual(\n result,\n nested_type.pack(),\n )\n\n def test_pack_works_with_actual_enums(self):\n e = xso.EnumElementType(self.SomeEnum, self.FancyType())\n for enum_value in self.SomeEnum:\n self.assertEqual(\n e.pack(enum_value),\n enum_value.value,\n )\n\n def test_get_xso_types_delegates_to_nested_type(self):\n nested_type = unittest.mock.Mock()\n e = xso.EnumElementType(\n unittest.mock.sentinel.enum_class,\n nested_type,\n )\n\n result = e.get_xso_types()\n nested_type.get_xso_types.assert_called_with()\n self.assertEqual(\n result,\n nested_type.get_xso_types(),\n )\n\n def test_pass_Enum_values_through_coerce(self):\n e = xso.EnumElementType(self.SomeEnum, self.FancyType())\n for enum_value in self.SomeEnum:\n self.assertIs(enum_value, e.coerce(enum_value))\n\n def test_reject_non_Enum_values_on_coerce(self):\n wrong = [\n 1,\n \"10\",\n 10.2,\n object()\n ]\n\n e = xso.EnumElementType(self.SomeEnum, self.FancyType())\n\n for thing in wrong:\n with self.assertRaises(TypeError):\n e.coerce(thing)\n\n def test_try_to_coerce_if_allow_coerce_is_set(self):\n enum_class = unittest.mock.Mock()\n e = xso.EnumElementType(\n enum_class,\n self.FancyType(),\n allow_coerce=True,\n )\n\n with warnings.catch_warnings() as w:\n result = e.coerce(unittest.mock.sentinel.value)\n\n enum_class.assert_called_with(\n unittest.mock.sentinel.value,\n )\n\n self.assertEqual(\n result,\n enum_class(),\n )\n\n self.assertFalse(w)\n\n def test_value_error_propagates(self):\n exc = ValueError()\n\n enum_class = unittest.mock.Mock()\n enum_class.side_effect = exc\n e = xso.EnumElementType(enum_class, self.FancyType(),\n allow_coerce=True)\n\n with self.assertRaises(ValueError) as ctx:\n e.coerce(unittest.mock.sentinel.value)\n\n self.assertIs(ctx.exception, exc)\n\n def test_deprecate_coerce(self):\n enum_class = self.SomeEnum\n e = xso.EnumElementType(\n enum_class,\n self.FancyType(),\n allow_coerce=True,\n deprecate_coerce=True,\n )\n\n with contextlib.ExitStack() as stack:\n warn = stack.enter_context(\n unittest.mock.patch(\n \"warnings.warn\",\n )\n )\n\n result = e.coerce(1)\n\n warn.assert_called_with(\n \"assignment of non-enum values to this descriptor is deprecated\",\n DeprecationWarning,\n stacklevel=4\n )\n\n self.assertEqual(\n result,\n enum_class(1),\n )\n\n def test_deprecate_coerce_custom_stacklevel(self):\n enum_class = self.SomeEnum\n e = xso.EnumElementType(\n enum_class,\n self.FancyType(),\n allow_coerce=True,\n deprecate_coerce=unittest.mock.sentinel.stacklevel,\n )\n\n with contextlib.ExitStack() as stack:\n warn = stack.enter_context(\n unittest.mock.patch(\n \"warnings.warn\",\n )\n )\n\n result = e.coerce(1)\n\n warn.assert_called_with(\n \"assignment of non-enum values to this descriptor is deprecated\",\n DeprecationWarning,\n stacklevel=unittest.mock.sentinel.stacklevel\n )\n\n self.assertEqual(\n result,\n enum_class(1),\n )\n\n def test_deprecate_coerce_does_not_emit_warning_for_enum_value(self):\n enum_class = self.SomeEnum\n e = xso.EnumElementType(\n enum_class,\n self.FancyType(),\n allow_coerce=True,\n deprecate_coerce=True,\n )\n\n value = enum_class.X\n\n with contextlib.ExitStack() as stack:\n warn = stack.enter_context(\n unittest.mock.patch(\n \"warnings.warn\",\n )\n )\n\n result = e.coerce(value)\n\n self.assertFalse(warn.mock_calls)\n\n self.assertIs(\n value,\n result,\n )\n\n def test_accept_unknown_by_default(self):\n enum_class = self.SomeEnum\n e = xso.EnumElementType(\n enum_class,\n xso.Integer(),\n )\n\n value = e.coerce(xso.Unknown(10))\n self.assertIsInstance(value, xso.Unknown)\n self.assertEqual(xso.Unknown(10), value)\n\n def test_accept_unknown_can_be_turned_off(self):\n enum_class = self.SomeEnum\n e = xso.EnumElementType(\n enum_class,\n xso.Integer(),\n accept_unknown=False,\n )\n\n with self.assertRaisesRegex(\n TypeError,\n r\"not a valid .* value: \"):\n e.coerce(xso.Unknown(10))\n\n def test_allow_unknown_by_default(self):\n enum_class = self.SomeEnum\n e = xso.EnumElementType(\n enum_class,\n self.FancyType(),\n )\n\n value = e.unpack(10)\n self.assertIsInstance(value, xso.Unknown)\n self.assertEqual(xso.Unknown(10), value)\n\n def test_allow_unknown_can_be_turned_off(self):\n enum_class = self.SomeEnum\n e = xso.EnumElementType(\n enum_class,\n self.FancyType(),\n allow_unknown=False,\n )\n\n with self.assertRaisesRegex(\n ValueError,\n r\"10 is not a valid .*SomeEnum\"):\n e.unpack(10)\n\n def test_format_works_with_unknown(self):\n enum_class = self.SomeEnum\n e = xso.EnumElementType(\n enum_class,\n self.FancyType(),\n )\n\n self.assertEqual(\n e.pack(xso.Unknown(10)),\n 10,\n )\n\n\nclass TestAbstractValidator(unittest.TestCase):\n def test_is_abstract(self):\n self.assertIsInstance(\n xso.AbstractValidator,\n abc.ABCMeta)\n with self.assertRaises(TypeError):\n xso.AbstractValidator()\n\n def test_validate_calls_validate_detailed(self):\n class FakeSubclass(xso.AbstractValidator):\n def validate_detailed(self, value):\n pass\n\n instance = FakeSubclass()\n obj = object()\n with unittest.mock.patch.object(instance, \"validate_detailed\") \\\n as validate_detailed:\n instance.validate(obj)\n\n self.assertSequenceEqual(\n [\n unittest.mock.call(obj),\n unittest.mock.call().__bool__(),\n ],\n validate_detailed.mock_calls\n )\n\n def test_validate_calls_validate_detailed_and_inverts_result(self):\n class FakeSubclass(xso.AbstractValidator):\n def validate_detailed(self, value):\n return []\n\n instance = FakeSubclass()\n obj = object()\n self.assertTrue(instance.validate(obj))\n\n\nclass TestRestrictToSet(unittest.TestCase):\n def test_is_abstract_validator(self):\n self.assertIsInstance(\n xso.RestrictToSet([]),\n xso.AbstractValidator)\n\n def test_validate(self):\n t = xso.RestrictToSet({\"foo\", \"bar\"})\n self.assertTrue(t.validate(\"foo\"))\n self.assertTrue(t.validate(\"bar\"))\n self.assertFalse(t.validate(\"baz\"))\n\n\nclass TestNmtoken(unittest.TestCase):\n def test_is_abstract_validator(self):\n self.assertIsInstance(\n xso.RestrictToSet([]),\n xso.AbstractValidator)\n\n def _test_samples(self, t, samples, group):\n for sample in samples:\n self.assertTrue(\n t.validate(sample),\n \"\\\\u{:04x} is supposed to be in {}\".format(ord(sample), group)\n )\n\n def test_validate(self):\n t = xso.Nmtoken()\n # ok, testing this sucks hard. we’ll do some hand-waiving tests\n # guarding against the most important characters which must not occur.\n\n self.assertTrue(t.validate(\"foobar\"))\n self.assertTrue(t.validate(\"foo:bar\"))\n self.assertTrue(t.validate(\"foo-bar\"))\n self.assertTrue(t.validate(\"foo.bar\"))\n self.assertTrue(t.validate(\"foo_bar\"))\n self.assertTrue(t.validate(\".\"))\n self.assertTrue(t.validate(\":\"))\n self.assertTrue(t.validate(\"_\"))\n self.assertTrue(t.validate(\".\"))\n\n self.assertFalse(t.validate(\"\\uf901\"))\n self.assertFalse(t.validate(\"\\ufffd\"))\n self.assertFalse(t.validate(\"\\u20dd\"))\n self.assertFalse(t.validate(\">\"))\n self.assertFalse(t.validate(\"<\"))\n self.assertFalse(t.validate(\"&\"))\n\n def test_validate_base_char(self):\n self._test_samples(\n xso.Nmtoken(),\n \"\\u0041\"\n \"\\u0061\"\n \"\\u00c0\"\n \"\\u00d8\"\n \"\\u00F8\"\n \"\\u0100\"\n \"\\u0134\"\n \"\\u0141\"\n \"\\u014A\"\n \"\\u0180\"\n \"\\u01CD\"\n \"\\u01F4\"\n \"\\u01FA\"\n \"\\u0250\"\n \"\\u02BB\"\n \"\\u0386\"\n \"\\u0388\"\n \"\\u038C\"\n \"\\u038E\"\n \"\\u03A3\"\n \"\\u03D0\"\n \"\\u03DA\"\n \"\\u03DC\"\n \"\\u03DE\"\n \"\\u03E0\"\n \"\\u03E2\"\n \"\\u0401\"\n \"\\u040E\"\n \"\\u0451\"\n \"\\u045E\"\n \"\\u0490\"\n \"\\u04C7\"\n \"\\u04CB\"\n \"\\u04D0\"\n \"\\u04EE\"\n \"\\u04F8\"\n \"\\u0531\"\n \"\\u0559\"\n \"\\u0561\"\n \"\\u05D0\"\n \"\\u05F0\"\n \"\\u0621\"\n \"\\u0641\"\n \"\\u0671\"\n \"\\u06BA\"\n \"\\u06C0\"\n \"\\u06D0\"\n \"\\u06D5\"\n \"\\u06E5\"\n \"\\u0905\"\n \"\\u093D\"\n \"\\u0958\"\n \"\\u0985\"\n \"\\u098F\"\n \"\\u0993\"\n \"\\u09AA\"\n \"\\u09B2\"\n \"\\u09B6\"\n \"\\u09DC\"\n \"\\u09DF\"\n \"\\u09F0\"\n \"\\u0A05\"\n \"\\u0A0F\"\n \"\\u0A13\"\n \"\\u0A2A\"\n \"\\u0A32\"\n \"\\u0A35\"\n \"\\u0A38\"\n \"\\u0A59\"\n \"\\u0A5E\"\n \"\\u0A72\"\n \"\\u0A85\"\n \"\\u0A8D\"\n \"\\u0A8F\"\n \"\\u0A93\"\n \"\\u0AAA\"\n \"\\u0AB2\"\n \"\\u0AB5\"\n \"\\u0ABD\"\n \"\\u0AE0\"\n \"\\u0B05\"\n \"\\u0B0F\"\n \"\\u0B13\"\n \"\\u0B2A\"\n \"\\u0B32\"\n \"\\u0B36\"\n \"\\u0B3D\"\n \"\\u0B5C\"\n \"\\u0B5F\"\n \"\\u0B85\"\n \"\\u0B8E\"\n \"\\u0B92\"\n \"\\u0B99\"\n \"\\u0B9C\"\n \"\\u0B9E\"\n \"\\u0BA3\"\n \"\\u0BA8\"\n \"\\u0BAE\"\n \"\\u0BB7\"\n \"\\u0C05\"\n \"\\u0C0E\"\n \"\\u0C12\"\n \"\\u0C2A\"\n \"\\u0C35\"\n \"\\u0C60\"\n \"\\u0C85\"\n \"\\u0C8E\"\n \"\\u0C92\"\n \"\\u0CAA\"\n \"\\u0CB5\"\n \"\\u0CDE\"\n \"\\u0CE0\"\n \"\\u0D05\"\n \"\\u0D0E\"\n \"\\u0D12\"\n \"\\u0D2A\"\n \"\\u0D60\"\n \"\\u0E01\"\n \"\\u0E30\"\n \"\\u0E32\"\n \"\\u0E40\"\n \"\\u0E81\"\n \"\\u0E84\"\n \"\\u0E87\"\n \"\\u0E8A\"\n \"\\u0E8D\"\n \"\\u0E94\"\n \"\\u0E99\"\n \"\\u0EA1\"\n \"\\u0EA5\"\n \"\\u0EA7\"\n \"\\u0EAA\"\n \"\\u0EAD\"\n \"\\u0EB0\"\n \"\\u0EB2\"\n \"\\u0EBD\"\n \"\\u0EC0\"\n \"\\u0F40\"\n \"\\u0F49\"\n \"\\u10A0\"\n \"\\u10D0\"\n \"\\u1100\"\n \"\\u1102\"\n \"\\u1105\"\n \"\\u1109\"\n \"\\u110B\"\n \"\\u110E\"\n \"\\u113C\"\n \"\\u113E\"\n \"\\u1140\"\n \"\\u114C\"\n \"\\u114E\"\n \"\\u1150\"\n \"\\u1154\"\n \"\\u1159\"\n \"\\u115F\"\n \"\\u1163\"\n \"\\u1165\"\n \"\\u1167\"\n \"\\u1169\"\n \"\\u116D\"\n \"\\u1172\"\n \"\\u1175\"\n \"\\u119E\"\n \"\\u11A8\"\n \"\\u11AB\"\n \"\\u11AE\"\n \"\\u11B7\"\n \"\\u11BA\"\n \"\\u11BC\"\n \"\\u11EB\"\n \"\\u11F0\"\n \"\\u11F9\"\n \"\\u1E00\"\n \"\\u1EA0\"\n \"\\u1F00\"\n \"\\u1F18\"\n \"\\u1F20\"\n \"\\u1F48\"\n \"\\u1F50\"\n \"\\u1F59\"\n \"\\u1F5B\"\n \"\\u1F5D\"\n \"\\u1F5F\"\n \"\\u1F80\"\n \"\\u1FB6\"\n \"\\u1FBE\"\n \"\\u1FC2\"\n \"\\u1FC6\"\n \"\\u1FD0\"\n \"\\u1FD6\"\n \"\\u1FE0\"\n \"\\u1FF2\"\n \"\\u1FF6\"\n \"\\u2126\"\n \"\\u212A\"\n \"\\u212E\" # deliberately excluded\n \"\\u2180\"\n \"\\u3041\"\n \"\\u30A1\"\n \"\\u3105\"\n \"\\uAC00\",\n \"BaseChar\"\n )\n\n def test_validate_ideographic(self):\n self._test_samples(\n xso.Nmtoken(),\n \"\\u4E00\"\n \"\\u3007\"\n \"\\u3021\",\n \"Ideographic\"\n )\n\n def test_validate_combining(self):\n self._test_samples(\n xso.Nmtoken(),\n \"\\u0300\"\n \"\\u0360\"\n \"\\u0483\"\n \"\\u0591\"\n \"\\u05A3\"\n \"\\u05BB\"\n \"\\u05BF\"\n \"\\u05C1\"\n \"\\u05C4\"\n \"\\u064B\"\n \"\\u0670\"\n \"\\u06D6\"\n \"\\u06DD\"\n \"\\u06E0\"\n \"\\u06E7\"\n \"\\u06EA\"\n \"\\u0901\"\n \"\\u093C\"\n \"\\u093E\"\n \"\\u094D\"\n \"\\u0951\"\n \"\\u0962\"\n \"\\u0981\"\n \"\\u09BC\"\n \"\\u09BE\"\n \"\\u09BF\"\n \"\\u09C0\"\n \"\\u09C7\"\n \"\\u09CB\"\n \"\\u09D7\"\n \"\\u09E2\"\n \"\\u0A02\"\n \"\\u0A3C\"\n \"\\u0A3E\"\n \"\\u0A3F\"\n \"\\u0A40\"\n \"\\u0A47\"\n \"\\u0A4B\"\n \"\\u0A70\"\n \"\\u0A81\"\n \"\\u0ABC\"\n \"\\u0ABE\"\n \"\\u0AC7\"\n \"\\u0ACB\"\n \"\\u0B01\"\n \"\\u0B3C\"\n \"\\u0B3E\"\n \"\\u0B47\"\n \"\\u0B4B\"\n \"\\u0B56\"\n \"\\u0B82\"\n \"\\u0BBE\"\n \"\\u0BC6\"\n \"\\u0BCA\"\n \"\\u0BD7\"\n \"\\u0C01\"\n \"\\u0C3E\"\n \"\\u0C46\"\n \"\\u0C4A\"\n \"\\u0C55\"\n \"\\u0C82\"\n \"\\u0CBE\"\n \"\\u0CC6\"\n \"\\u0CCA\"\n \"\\u0CD5\"\n \"\\u0D02\"\n \"\\u0D3E\"\n \"\\u0D46\"\n \"\\u0D4A\"\n \"\\u0D57\"\n \"\\u0E31\"\n \"\\u0E34\"\n \"\\u0E47\"\n \"\\u0EB1\"\n \"\\u0EB4\"\n \"\\u0EBB\"\n \"\\u0EC8\"\n \"\\u0F18\"\n \"\\u0F35\"\n \"\\u0F37\"\n \"\\u0F39\"\n \"\\u0F3E\"\n \"\\u0F3F\"\n \"\\u0F71\"\n \"\\u0F86\"\n \"\\u0F90\"\n \"\\u0F97\"\n \"\\u0F99\"\n \"\\u0FB1\"\n \"\\u0FB9\"\n \"\\u20D0\"\n \"\\u20E1\"\n \"\\u302A\"\n \"\\u3099\"\n \"\\u309A\",\n \"CombiningChar\"\n )\n\n def test_validate_digit(self):\n self._test_samples(\n xso.Nmtoken(),\n \"\\u0030\"\n \"\\u0660\"\n \"\\u06F0\"\n \"\\u0966\"\n \"\\u09E6\"\n \"\\u0A66\"\n \"\\u0AE6\"\n \"\\u0B66\"\n \"\\u0BE7\"\n \"\\u0C66\"\n \"\\u0CE6\"\n \"\\u0D66\"\n \"\\u0E50\"\n \"\\u0ED0\"\n \"\\u0F20\",\n \"Digit\"\n )\n\n def test_validate_extender(self):\n self._test_samples(\n xso.Nmtoken(),\n \"\\u00B7\"\n \"\\u02D0\"\n \"\\u02D1\"\n \"\\u0387\"\n \"\\u0640\"\n \"\\u0E46\"\n \"\\u0EC6\"\n \"\\u3005\"\n \"\\u3031\"\n \"\\u309D\"\n \"\\u30FC\",\n \"Extender\"\n )\n\n\nclass TestIsInstance(unittest.TestCase):\n def test_is_abstract_validator(self):\n self.assertTrue(issubclass(\n xso.IsInstance,\n xso.AbstractValidator\n ))\n\n def test_validate(self):\n v = xso.IsInstance((str, bytes))\n self.assertTrue(\n v.validate(\"abc\")\n )\n self.assertTrue(\n v.validate(b\"abc\")\n )\n self.assertFalse(\n v.validate(1)\n )\n\n def test_list_of_classes_is_shared(self):\n classes = []\n v = xso.IsInstance(classes)\n\n self.assertFalse(\n v.validate(\"str\")\n )\n\n classes.append(str)\n\n self.assertTrue(\n v.validate(\"str\")\n )\n\n\nclass TestNumericRange(unittest.TestCase):\n def test_is_abstract_validator(self):\n self.assertTrue(issubclass(\n xso.NumericRange,\n xso.AbstractValidator\n ))\n\n def test_validate_ok(self):\n v = xso.NumericRange(min_=10, max_=20)\n for i in range(10, 21):\n self.assertTrue(\n v.validate(i),\n )\n\n def test_validate_detailed_out_of_bounds(self):\n v = xso.NumericRange(min_=10, max_=20)\n for i in range(0, 10):\n self.assertFalse(\n v.validate(i),\n )\n\n def test_validate_detailed_too_small(self):\n v = xso.NumericRange(min_=10)\n for i in range(0, 10):\n self.assertFalse(\n v.validate(i),\n )\n\n def test_validate_detailed_too_large(self):\n v = xso.NumericRange(max_=-1)\n for i in range(0, 10):\n self.assertFalse(\n v.validate(i),\n )\n\n\nclass TestEnumType(unittest.TestCase):\n def test_instanciates_EnumCDataType_by_default_and_passes_kwargs(self):\n with contextlib.ExitStack() as stack:\n EnumCDataType = stack.enter_context(\n unittest.mock.patch(\"aioxmpp.xso.types.EnumCDataType\")\n )\n\n result = xso.EnumType(\n unittest.mock.sentinel.enum_class,\n foo=unittest.mock.sentinel.foo,\n bar=unittest.mock.sentinel.bar,\n )\n\n EnumCDataType.assert_called_once_with(\n unittest.mock.sentinel.enum_class,\n foo=unittest.mock.sentinel.foo,\n bar=unittest.mock.sentinel.bar,\n )\n\n self.assertEqual(result, EnumCDataType())\n\n def test_instanciates_EnumCDataType_for_AbstractCDataType(self):\n m = unittest.mock.Mock(\n spec=xso.AbstractCDataType\n )\n\n with contextlib.ExitStack() as stack:\n EnumCDataType = stack.enter_context(\n unittest.mock.patch(\"aioxmpp.xso.types.EnumCDataType\")\n )\n\n result = xso.EnumType(\n unittest.mock.sentinel.enum_class,\n m,\n foo=unittest.mock.sentinel.foo,\n bar=unittest.mock.sentinel.bar,\n )\n\n EnumCDataType.assert_called_once_with(\n unittest.mock.sentinel.enum_class,\n m,\n foo=unittest.mock.sentinel.foo,\n bar=unittest.mock.sentinel.bar,\n )\n\n self.assertEqual(result, EnumCDataType())\n\n def test_instanciates_EnumCDataType_for_AbstractElementType(self):\n m = unittest.mock.Mock(\n spec=xso.AbstractElementType\n )\n\n with contextlib.ExitStack() as stack:\n EnumElementType = stack.enter_context(\n unittest.mock.patch(\"aioxmpp.xso.types.EnumElementType\")\n )\n\n result = xso.EnumType(\n unittest.mock.sentinel.enum_class,\n m,\n foo=unittest.mock.sentinel.foo,\n bar=unittest.mock.sentinel.bar,\n )\n\n EnumElementType.assert_called_once_with(\n unittest.mock.sentinel.enum_class,\n m,\n foo=unittest.mock.sentinel.foo,\n bar=unittest.mock.sentinel.bar,\n )\n\n self.assertEqual(result, EnumElementType())\n","repo_name":"horazont/aioxmpp","sub_path":"tests/xso/test_types.py","file_name":"test_types.py","file_ext":"py","file_size_in_byte":67926,"program_lang":"python","lang":"en","doc_type":"code","stars":215,"dataset":"github-code","pt":"21"} +{"seq_id":"7961854321","text":"import numpy as np\nimport torch\nfrom torchvision import transforms\nfrom skimage.transform import resize\n\nclass MultiInputToTensor(object):\n def __init__(self,images=[\"x\"],metadata=[\"y\",\"vp\"]):\n self.images=images\n self.metadata=metadata\n self.TT=transforms.ToTensor()\n def __call__(self,sample):\n for k in self.images:\n sample[k]=(self.TT(sample[k])).float()\n for k in self.metadata:\n sample[k]=torch.tensor(sample[k]).float()\n return sample\n\nclass vp_one_hot_encoding(object):\n def __init__(self):\n self.encoding={\n 'left':np.array([1,0,0]),\n 'right':np.array([0,1,0]),\n 'top':np.array([0,0,1])\n }\n def __call__(self,sample):\n sample[\"vp\"]=self.encoding[sample[\"vp\"]]\n return sample\n\nclass MultiInputResize(object):\n def __init__(self,to_shape=(300,100),rgb_keys=[\"x\"],mask_keys=[]):\n self.shape=to_shape\n self.rgb_keys=rgb_keys\n self.mask_keys=mask_keys\n def __call__(self,sample):\n for k in self.rgb_keys:\n sample[k]=resize(sample[k],self.shape,preserve_range=True)\n for k in self.mask_keys:\n sample[k]=np.round(resize(sample[k],self.shape,preserve_range=True))\n return sample\n\nclass SelectInput(object):\n def __init__(self,output_keys=[]):\n self.output_keys=output_keys\n def __call__(self,sample):\n output={}\n for k in self.output_keys:\n output[k]=sample[k]\n return output","repo_name":"Abdigal1/belugas_classification","sub_path":"Preprocesamiento/Custom_Transforms.py","file_name":"Custom_Transforms.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2971925842","text":"#Reber Ferhat Uluca - 170401053\r\nimport socket\r\nimport os\r\n\r\ntry:\r\n client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\nexcept socket.error:\r\n print(\"Socket error!\")\r\n\r\nsep = os.sep\r\nclient_files = os.getcwd() + os.sep + \"client_files\"\r\nbuffer = 1024\r\n\r\nhost = input(\"Enter the host ip address: \")\r\nport = int(input(\"Enter the port number: \"))\r\n\r\ntry:\r\n client.settimeout(1)\r\n client.sendto(\"HI!\".encode(\"utf-8\"), (host, port))\r\n msg = client.recv(buffer)\r\n print(msg.decode(\"utf-8\") + \" to the server!\")\r\n client.settimeout(None)\r\nexcept:\r\n print(\"Invalid host name or port or server is not responding!\")\r\n quit()\r\n\r\ndef GET(filename):\r\n try:\r\n client.sendto((\"GET \" + filename).encode(\"utf-8\"), (host, 42))\r\n except:\r\n return 1\r\n\r\n length = client.recv(buffer).decode(\"utf-8\")\r\n\r\n if length == \"File doesn't exist!\":\r\n print(\"File doesn't exist on server!\")\r\n return 0\r\n\r\n length = int(length)\r\n f = open(client_files + sep + filename, \"wb\")\r\n packets = 0\r\n\r\n print(\"receiving data...\")\r\n while length > 0:\r\n packets += 1\r\n try:\r\n client.settimeout(3)\r\n rec = client.recv(buffer)\r\n client.sendto(str(packets).encode(\"utf-8\"), (host, 42))\r\n except socket.timeout:\r\n f.close()\r\n os.remove(client_files + sep + filename)\r\n print(\"Connection to the server has been lost!\")\r\n print(\"Couldn't get\", filename)\r\n print(\"Only\", packets-1, \"packets received\")\r\n return 1\r\n\r\n f.write(rec)\r\n length -= buffer\r\n\r\n print(filename, \"received\")\r\n f.close()\r\n return 0\r\n\r\ndef PUT(filename):\r\n\r\n if not os.path.isfile(client_files + sep + filename):\r\n print(\"File doesn't exist in client_files folder\")\r\n return 0\r\n else:\r\n try:\r\n client.sendto((\"PUT \" + filename).encode(\"utf-8\"), (host, 42))\r\n except:\r\n return 1\r\n file_size = os.path.getsize(client_files + sep + filename)\r\n client.sendto(str(file_size).encode(\"utf-8\"), (host, 42))\r\n f = open(client_files + sep + filename, \"rb\")\r\n print(\"Sending packets..\")\r\n\r\n packets = 0\r\n data = f.read(buffer)\r\n while data:\r\n try:\r\n client.settimeout(3)\r\n client.sendto(data, (host, 42))\r\n rec = int(client.recv(buffer).decode(\"utf-8\"))\r\n except socket.timeout:\r\n print(\"Connection to the server has been lost!\")\r\n print(\"Couldn't sent\", filename)\r\n print(\"Only\", packets, \"packets sent\")\r\n print(\"Couldn't sent\", (file_size - (packets * buffer)) // buffer + 1, \"packets\")\r\n return 1\r\n\r\n if rec != packets+1:\r\n continue\r\n\r\n data = f.read(buffer)\r\n packets += 1\r\n\r\n print(filename, \"sent to the server\")\r\n f.close()\r\n return 0\r\n\r\ndef LIST():\r\n try:\r\n client.sendto(\"LIST\".encode(\"utf-8\"), (host, 42))\r\n except:\r\n return 1\r\n\r\n length = int(client.recv(buffer).decode(\"utf-8\"))\r\n files = []\r\n packets = 0\r\n\r\n while length > 0:\r\n packets += 1\r\n try:\r\n client.settimeout(3)\r\n rec = client.recv(buffer)\r\n client.sendto(str(packets).encode(\"utf-8\"), (host, 42))\r\n except socket.timeout:\r\n print(\"Connection to the server has been lost!\")\r\n return 1\r\n\r\n files.append(rec.decode(\"utf-8\"))\r\n length -= 1\r\n\r\n print(\" Name\\t\\t\\t\\t\\t Size\")\r\n for file in files:\r\n sp = file.split(\":\")\r\n file_size = int(sp[1])\r\n print(\"{:<25}{}\".format(sp[0], convert_bytes(file_size)))\r\n\r\n return 0\r\n\r\ndef convert_bytes(size):\r\n for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:\r\n if size < 1024.0:\r\n return \"%3.1f %s\" % (size, x)\r\n size /= 1024.0\r\n return size\r\n\r\ndef main():\r\n print(\"Available commands are listed below:\\n 1-)GET -file_name- \\n 2-)PUT -file_name-\\n 3-)LIST\")\r\n while True:\r\n command = input(\"-> \")\r\n com = command.split(\" \")\r\n\r\n if com[0] == \"GET\":\r\n if GET(com[1]):\r\n return\r\n client.settimeout(None)\r\n\r\n elif com[0] == \"PUT\":\r\n if PUT(com[1]):\r\n return\r\n client.settimeout(None)\r\n\r\n elif com[0] == \"LIST\":\r\n if LIST():\r\n\r\n return\r\n client.settimeout(None)\r\n\r\n else:\r\n print(\"invalid command!\")\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"nyucel/blm304","sub_path":"vize/170401053/client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4573,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"21"} +{"seq_id":"74693675891","text":"import string\nimport sys\nimport base64\nfrom xor import *\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n try:\n with open(sys.argv[1],\"r\") as f:\n cipher = \"\".join(f.readlines())\n cipher = base64.b64decode(cipher)\n xor = XORCipher(cipher)\n result = xor.bruteforce()\n result.print()\n except FileNotFoundError:\n print(\"File Not Found.\")\n else:\n print(\"Usage: python3 {} \".format(sys.argv[0]))","repo_name":"cyberbuff/Cryptography","sub_path":"lab3.py","file_name":"lab3.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42959615125","text":"soma = 0 \nx = True\nlista=[]\n\nwhile x:\n z = int(input('digite números, e zero para: '))\n lista.append(z)\n if z == 0 :\n x = False\nfor e in lista:\n soma += e\n\nprint(soma)\n\n \n ","repo_name":"gabriellaec/desoft-analise-exercicios","sub_path":"backup/user_392/ch39_2019_09_16_13_31_47_426477.py","file_name":"ch39_2019_09_16_13_31_47_426477.py","file_ext":"py","file_size_in_byte":202,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28779845025","text":"import heapq\r\n\r\n\r\nclass Solution:\r\n def thirdMax(self, nums) -> int:\r\n nums = list(set(nums))\r\n hp = nums[:3]\r\n heapq.heapify(hp)\r\n for num in nums[3:]:\r\n heapq.heappush(hp, num)\r\n heapq.heappop(hp)\r\n return heapq.heappop(hp) if len(hp) >= 3 else max(hp)\r\n\r\n\r\na = Solution()\r\ninp = [1, 2]\r\nprint(a.thirdMax(inp))\r\n","repo_name":"MinnanZhou/Leetcode","sub_path":"414.py","file_name":"414.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35787749552","text":"import pandas as pd\nimport requests\nfrom requests.auth import HTTPBasicAuth\nfrom pathlib import Path\nimport pandas as pd\nimport numpy as np\n\npath_list = Path('C:\\\\Users\\\\Admin\\\\Desktop\\\\').glob('**/*.dat')\npaths = []\n\nfor path in path_list:\n paths.append(str(path))\n \nframes = [] # hold df for each .dat file\n\ncolumn_names = ['year', 'jday', 'month', 'day', 'hour', 'min', 'dt', 'zen', \n 'dw_solar', 'dw_solar_QC', \n 'uw_solar', 'uw_solar_QC',\n 'direct_n', 'direct_n_QC',\n 'diffuse', 'diffuse_QC',\n 'dw_ir', 'dw_ir_QC',\n 'dw_casetemp', 'dw_casetemp_QC',\n 'dw_dometemp', 'dw_dometemp_QC',\n 'uw_ir', 'uw_ir_QC',\n 'uw_casetemp', 'uw_casetemp_QC',\n 'uw_dometemp', 'uw_dometemp_QC',\n 'uvb', 'uvb_QC',\n 'par', 'par_QC',\n 'netsolar', 'netsolar_QC',\n 'netir', 'netir_QC',\n 'totalnet', 'totalnet_QC',\n 'temp', 'temp_QC',\n 'rh', 'rh_QC',\n 'windspd', 'windspd_QC',\n 'winddir', 'winddir_QC',\n 'pressure', 'pressure_QC']\n\nfor path in paths:\n print(f'Start: load file {(paths.index(path)) + 1}/{len(paths)}')\n\n with open(path,'r') as f:\n df = pd.DataFrame(l.rstrip().split() for l in f)\n \n station_name = df[0][0]\n lat = df[0][1]\n lng = df[1][1]\n alt = df[2][1]\n \n s = df.shape[0]-2\n df = df.tail(s).reset_index(drop=True)\n df.columns = column_names\n \n df['station_name'] = station_name\n df['lat'] = lat\n df['lng'] = lng\n df['alt'] = alt\n \n df.drop(columns=['dw_solar', 'dw_solar_QC', \n 'uw_solar', 'uw_solar_QC',\n 'diffuse', 'diffuse_QC',\n 'dw_ir', 'dw_ir_QC',\n 'dw_casetemp', 'dw_casetemp_QC',\n 'dw_dometemp', 'dw_dometemp_QC',\n 'uw_ir', 'uw_ir_QC',\n 'uw_casetemp', 'uw_casetemp_QC',\n 'uw_dometemp', 'uw_dometemp_QC',\n 'uvb', 'uvb_QC',\n 'par', 'par_QC',\n 'netsolar', 'netsolar_QC',\n 'netir', 'netir_QC',\n 'totalnet', 'totalnet_QC',\n 'temp', 'temp_QC',\n 'rh', 'rh_QC',\n 'windspd', 'windspd_QC',\n 'winddir', 'winddir_QC',\n 'pressure', 'pressure_QC'], inplace=True)\n \n frames.append(df)\n print(f'End: load file {(paths.index(path)) + 1}/{len(paths)}')\n \nprint('Start: combine files.')\nresult = pd.concat(frames)\nresult.drop(result[result['direct_n_QC'] != 0].index, inplace=True) # data cleaning step\nresult.to_csv('surfrad_data.csv', index=False)\nprint('End: combine files. All .dat files now stored in surfrad_data.csv')\n\ndef return_geojson(lat,lng,increment):\n \"\"\"returns geojson box around lat and lng\"\"\"\n\n geojson_geometry = { # (lng,lat)\n \"type\": \"Polygon\",\n \"coordinates\": [\n [\n [\n lng+increment,\n lat+increment\n ],\n [\n lng+increment,\n lat-increment\n ],\n [\n lng-increment,\n lat-increment\n ],\n [\n lng-increment,\n lat+increment\n ],\n [\n lng+increment,\n lat+increment\n ]\n ]\n ]\n }\n \n return geojson_geometry\n\nstations = ['Bondville', # station names\n 'Boulder', \n 'Desert Rock', \n 'Fort Peek', \n 'Goodwin Creek', \n 'Sioux Falls']\n\nlat_lng_s = [(40.5,-88.37), # station lat/lng in order of station names\n (40.13,-105.24), \n (36.624,-116.019), \n (48.31,-105.1), \n (34.25,-89.87), \n (43.73,-96.62)]\n\ndef clouds(geojson):\n \"\"\"gets cloud data from Planet API for daterange\"\"\"\n\n geojson_geometry = geojson # takes lng/lat\n \n geometry_filter = {\n \"type\": \"GeometryFilter\",\n \"field_name\": \"geometry\",\n \"config\": geojson_geometry\n }\n \n date_range_filter = { \n \"type\": \"DateRangeFilter\",\n \"field_name\": \"acquired\",\n \"config\": {\n \"gte\": \"2019-01-01T00:00:00.000Z\", # start date of image capture\n \"lte\": \"2019-01-02T00:00:00.000Z\" # end date of image capture\n }\n }\n \n combined_filter = {\n \"type\": \"AndFilter\",\n \"config\": [geometry_filter, date_range_filter]\n }\n \n os.environ['PL_API_KEY']='' # insert planet API key\n PLANET_API_KEY = os.getenv('PL_API_KEY')\n \n search_request = {\n \"item_types\": [\"PSScene4Band\"],\n \"filter\": combined_filter\n }\n \n search_result = \\\n requests.post(\n 'https://api.planet.com/data/v1/quick-search',\n auth=HTTPBasicAuth(PLANET_API_KEY, ''),\n json=search_request)\n \n cloud_date = [(feature['properties']['cloud_cover'],feature['properties']['acquired']) for feature in search_result.json()['features']]\n \n return cloud_date\n\ninformation = [{'station': stations[i], \n 'lat': lat_lng_s[i][0], \n 'lng': lat_lng_s[i][1], \n 'geojson': return_geojson(lat_lng_s[i][0],lat_lng_s[i][1],0.03663),\n 'clouds' : clouds(return_geojson(lat_lng_s[i][0],lat_lng_s[i][1],0.03663))} for i in range(0,len(stations))]\n\n\nrows = []\nfor i in information:\n for sat in i['clouds']:\n rows.append([i['station'],sat[0],sat[1]]) # [station, clouds, time]\n \ndf = pd.DataFrame(rows, columns=['Station', 'Clouds (0-1)', 'Time'])\ndf.to_csv('clouds.csv', index=False)\nprint('NOAA Station cloud data stored in clouds.csv')\n\nold_stations = ['Bondville', # station names\n 'Table', # Boulder\n 'Desert', # Desert Rock\n 'Fort', # Fort Peek\n 'Goodwin', # Goodwin Creek\n 'Sioux'] # Sioux Falls\n\nnew_stations = ['Bondville', # station names\n 'Boulder', \n 'Desert Rock', \n 'Fort Peek', \n 'Goodwin Creek', \n 'Sioux Falls'] \n\ndf1 = pd.read_csv('surfrad_data.csv')\ndf2 = pd.read_csv('clouds.csv')\n\ndf1['clouds'] = [np.nan]*df1.shape[0] # initalize column\n\n# replace old stations names in df1 with new ones\nfor i in old_stations:\n z = list(df1.loc[df1['station_name'] == i].index)\n old_i = old_stations.index(i)\n \n for j in z:\n df1.at[j, 'station_name'] = new_stations[old_i]\n\n\n# create jday column for cloud data\ndef get_month_days(month_number):\n \"\"\"month_number = 1 in January month_number = 12 in December\"\"\"\n month_days = [31,28,31,30,31,30,31,31,30,31,30,31]\n return month_days[month_number-1]\n \ndf2['jday'] = [i for i in range(0,df2.shape[0])] # initalize column\n\nfor i in range(0,df2.shape[0]): \n month = int(df2.loc[i]['Time'][6:7])\n day = int(df2.loc[i]['Time'][8:10])\n \n k = 0\n for j in range(1,month):\n k += get_month_days(j)\n \n df2.at[i, 'jday'] = (day + k)\n \n# add all same day photos to surfrad\nstation_dicts = [] # list of dictionaries for each station with cloud data\n \nfor i in new_stations: \n cloud_list = [] # list of dicts, one for each jday\n\n jj2 = list(df2.loc[df2['Station'] == i].index)\n\n for j in range(df2['jday'].min(),df2['jday'].max()):\n \n jj1 = list(df2.loc[df2['jday'] == j].index)\n \n jj3 = list(set(jj1) & set(jj2))\n \n try:\n day_dict = {str(j): [df2.loc[k]['Clouds (0-1)'] for k in jj3]}\n cloud_list.append(day_dict)\n \n except:\n pass\n \n cloud_dict = {'station': i, 'clouds': cloud_list}\n\n station_dicts.append(cloud_dict)\n\nmiss = []\nfor i in range(0,df1.shape[0]):\n station_index = new_stations.index(df1.loc[i]['station_name'])\n _jday = df1.loc[i]['jday']\n \n cloud_list = station_dicts[station_index]['clouds'][_jday-df2['jday'].min()][str(_jday)] \n \n if len(cloud_list) != 0:\n df1.at[i, 'clouds'] = np.mean(cloud_list)\n \n else:\n miss.append(i)\n df1.at[i, 'clouds'] = -9999.9\n\n# deal with missing values\ndf3 = df1[df1['clouds'] != -9999.9]\nmean_c = np.mean(df3.clouds)\nfor i in miss:\n df1.at[i, 'clouds'] = mean_c\n\n# save final file\ndf1.to_csv('surfrad_data.csv', index=False)\nprint('Final file prepared. Find surfrad_data.csv')\n","repo_name":"ian-double-u/solar","sub_path":"surfrad_data_prep.py","file_name":"surfrad_data_prep.py","file_ext":"py","file_size_in_byte":8406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42939900115","text":"def interseccao_valores(dict1, dict2):\n lista=[]\n lista1=[]\n lista2=[]\n for i in dict1:\n valor1=dict1[i]\n lista1.append(valor1)\n for e in dict2:\n valor2=dict2[e]\n lista2.append(valor2)\n for k in range(len(dict1)):\n for l in range(len(dict1)):\n if lista1[k]==lista2[l]:\n lista.append(lista1[k])\n \n return lista","repo_name":"gabriellaec/desoft-analise-exercicios","sub_path":"backup/user_356/ch81_2019_06_04_20_02_19_137215.py","file_name":"ch81_2019_06_04_20_02_19_137215.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30697104782","text":"import decimal\n\nfrom django.test import TestCase\nfrom django.utils import timezone\n\nfrom mock import patch\n\nfrom .models import Order\n\n\nclass OrderTests(TestCase):\n\n def setUp(self):\n self.order = Order(\n order_id=\"XXXXXX\",\n completed_at=timezone.now(),\n status=\"completed\",\n satoshi=23324233,\n cents=4343,\n currency_iso=\"USD\",\n custom=\"\",\n button_type=\"foo\",\n button_name=\"foo\",\n button_description=\"foo\",\n button_id=\"foo\",\n transaction_id=\"foo\",\n transaction_hash=\"foo\",\n transaction_confirmations=\"foo\"\n )\n # pylint: disable=C0301\n self.notification_data = {\n \"order\": {\n \"id\": \"5RTQNACF\",\n \"created_at\": \"2012-12-09T21:23:41-08:00\",\n \"status\": \"completed\",\n \"total_btc\": {\n \"cents\": 100000000,\n \"currency_iso\": \"BTC\"\n },\n \"total_native\": {\n \"cents\": 1253,\n \"currency_iso\": \"USD\"\n },\n \"custom\": \"order1234\",\n \"button\": {\n \"type\": \"buy_now\",\n \"name\": \"Alpaca Socks\",\n \"description\": \"The ultimate in lightweight footwear\",\n \"id\": \"5d37a3b61914d6d0ad15b5135d80c19f\"\n },\n \"transaction\": {\n \"id\": \"514f18b7a5ea3d630a00000f\",\n \"hash\": \"4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b\",\n \"confirmations\": 0\n }\n }\n }\n\n def test_satoshi_conversion(self):\n self.assertEquals(\n decimal.Decimal(\"0.23324233\"),\n self.order.total_bitcoin()\n )\n\n def test_cents_conversion(self):\n self.assertEquals(\n decimal.Decimal(\"43.43\"),\n self.order.total_native()\n )\n\n @patch(\"requests.get\")\n def test_process_handling_normal_order_data(self, GetMock):\n GetMock.return_value.json.return_value = self.notification_data\n order = Order.process(self.notification_data)\n self.assertEquals(order.order_id, self.notification_data[\"order\"][\"id\"])\n self.assertEquals(order.satoshi, 100000000)\n self.assertEquals(order.cents, 1253)\n\n @patch(\"requests.get\")\n def test_process_handling_order_data_without_description(self, GetMock):\n self.notification_data[\"order\"][\"button\"][\"description\"] = None\n GetMock.return_value.json.return_value = self.notification_data\n\n order = Order.process(self.notification_data)\n self.assertEquals(order.order_id, self.notification_data[\"order\"][\"id\"])\n self.assertIsNone(order.button_description)\n","repo_name":"eldarion/django-coinbase","sub_path":"coinbase/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2876,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"74533538931","text":"import pymysql\nfrom bs4 import BeautifulSoup\nimport codecs\nimport sys\n\ndatasource_id = str(sys.argv[1])\npw = str(sys.argv[2])\nforge_id = 66\nfileLoc = ''\n\n# Open local database connection 1\ndb1 = pymysql.connect(host=\"grid6.cs.elon.edu\",\n user=\"megan\",\n passwd=pw,\n db=\"ossmole_merged\",\n use_unicode=True,\n charset=\"utf8\")\ncursor1 = db1.cursor()\n\n\n\n# Open local database connection 2\ndb2 = pymysql.connect(host=\"grid6.cs.elon.edu\",\n user=\"megan\",\n passwd=pw,\n db=\"bitcoin\",\n use_unicode=True,\n charset=\"utf8\")\ncursor2 = db2.cursor()\n\n# Open remote database connection 3\ndb3 = pymysql.connect(host=\"flossdata.syr.edu\",\n user=\"megan\",\n passwd=pw,\n db=\"bitcoin\",\n use_unicode=True,\n charset=\"utf8\")\ncursor3 = db3.cursor()\n\n# get the list of all files to parse\ncursor1.execute('SELECT datasource_id, comments \\\n FROM datasources \\\n WHERE datasource_id >= %s \\\n AND forge_id= %s', (datasource_id, forge_id))\n \nrows = cursor1.fetchall()\nfor row in rows:\n current_ds = row[0]\n fileLoc = row[1]\n linecounter = 0;\n \n date_of_entry = fileLoc.split('/',1)[1]\n time_of_entry = ''\n unix_time = ''\n send_user = ''\n line_message = ''\n cleaned_message = ''\n linetype = 'message'\n \n print('processing', current_ds, 'at', fileLoc)\n log = codecs.open(fileLoc, 'r', encoding='utf-8', errors='ignore')\n soup = BeautifulSoup(log)\n\n# the file looks like this: \n#\n#\n#\n#09:33\n#\n#\n#cdecker\n#Well it's a project with 1'500'000 sloc\n#\n#\n\n\n for trow in soup.find_all('tr'):\n linecounter += 1\n unix_time = trow.td.a['name']\n time_of_entry = trow.td.a.next_sibling.get_text()\n send_user = trow.td.next_sibling.next_sibling.get_text()\n line_message = trow.td.next_sibling.next_sibling.next_sibling.next_sibling.get_text().rstrip()\n #print(date_of_entry,'|',linecounter,'|',unix_time,'|',time_of_entry,'|',send_user,'|',line_message,'|',linetype)\n\n #insert parsed data into databases\n try:\n cursor2.execute(u\"INSERT INTO bitcoindev_irc(datasource_id, \\\n line_num, \\\n date_of_entry, \\\n time_of_entry, \\\n unix_time, \\\n send_user, \\\n line_message, \\\n type) \\\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\", \n (current_ds, \n linecounter,\n date_of_entry,\n time_of_entry,\n unix_time, \n send_user, \n line_message, \n linetype\n ))\n db2.commit() \n except pymysql.Error as error:\n print(error)\n db2.rollback()\n \n try:\n cursor3.execute(u\"INSERT INTO bitcoindev_irc(datasource_id, \\\n line_num, \\\n date_of_entry, \\\n time_of_entry, \\\n unix_time, \\\n send_user, \\\n line_message, \\\n type) \\\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\", \n (current_ds, \n linecounter,\n date_of_entry,\n time_of_entry,\n unix_time, \n send_user, \n line_message, \n linetype\n ))\n db3.commit() \n except pymysql.Error as error:\n print(error)\n db3.rollback()\n\ndb1.close()\ndb2.close()\ndb3.close() \ncursor1.close()\ncursor2.close() \ncursor3.close() \n","repo_name":"FLOSSmole/bitcoin","sub_path":"2parseBitcoinDevIRCLogs.py","file_name":"2parseBitcoinDevIRCLogs.py","file_ext":"py","file_size_in_byte":4021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40934026207","text":"import os\nfrom bottle import route, run, request\nimport subprocess\nfrom ansi2html import ansi2html\n\n\n@route('/hello/:name')\ndef test(name='World'):\n return 'Hello %s!' % name\n@route('/')\ndef index():\n\t#html=subprocess.call(['python3','ti.py'])\n\n\tif os.path.exists(\"data\"):\n\t\tos.remove(\"data\")\n\tdone=os.system(\"python3 crawler.py >> data\")\n\twith open('data', 'r') as myfile:\n\t\thtml = myfile.read()\n\tif request.environ.get('HTTP_USER_AGENT').strip().startswith(\"curl\"):\n\t\treturn html\n\n\n\tstyle='''\n\t\n\t'''\n\n\n\thtml = ansi2html(html)\n\thtml2=html.split(\"\\n\")\n\thtml3=\"\"\n\tfor line in html2:\n\t\thtml3=html3+\"
\" + line + \"
\\n\" \n\thtml=html.replace(\"\\n\",\"
\")\n\treturn style+html3\n\nport = os.environ.get('PORT', 5000)\ndone=os.system(\"python3 ti.py\")\nrun(host='0.0.0.0', port=port)\n","repo_name":"sajadghorbanii/tir","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28508278760","text":"#將檔案從地端append到gcs\nfrom google.cloud import storage\nclient = storage.Client.from_service_account_json('aib2bpoc-78af3836f1bd.json')\n\n\ndef update_file(bucket_name, blob_name, new_content):\n bucket = client.get_bucket(bucket_name)\n blob = bucket.blob(blob_name)\n\n # Upload the new content\n blob.upload_from_string(new_content, content_type='text/plain')\n\n print(f'File {blob_name} in bucket {bucket_name} updated with new content.')\n\n# Specify the bucket name, blob (file) name, and the new content\nbucket_name = 'aib2bdata'\nblob_name = 'text.txt'\nnew_content = 'This is the updated content.'\n\nupdate_file(bucket_name, blob_name, new_content)","repo_name":"Sean00001/elk-gcs-api","sub_path":"gcs-update-file.py","file_name":"gcs-update-file.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1660550846","text":"import sys\ninput = sys.stdin.readline\ndef sol():\n N = int(input())\n eggs = [list(map(int, input().split())) for _ in range(N)]\n crashed = [False for _ in range(N)]\n max_cnt = 0\n def DFS(i, cnt):\n # 가장 최근에 든 계란이 가장 오른쪽에 위치한 계란일 경우 종료한다.\n if i > N - 1:\n nonlocal max_cnt\n max_cnt = max(max_cnt, cnt)\n return\n # 이미 다 깰 수 있는 경우의 수가 있따면 종료한다.\n if max_cnt == N: return\n # 남은 계란수의 2배와 지금까지 깬 수의 합이 최댓값보다 작으면 종료한다.\n if (N - i) * 2 + cnt < max_cnt:\n return\n # 들고 있는 계란이 깨졌거나 깨지지않은 다른 계란이 없으면 넘어간다.\n if crashed[i] == True or crashed.count(False) == 1:\n DFS(i + 1, cnt)\n else:\n for j in range(N):\n if i == j: continue\n if crashed[j] == False:\n temp = 0\n # 부딪혀 보고\n eggs[i][0] -= eggs[j][1]\n eggs[j][0] -= eggs[i][1]\n # 깨지면 깨졌다고 한다\n if eggs[i][0] <= 0: temp += 1; crashed[i] = True\n if eggs[j][0] <= 0: temp += 1; crashed[j] = True\n DFS(i + 1, cnt + temp)\n # 다음을 위해 복구한다\n if eggs[i][0] <= 0: crashed[i] = False\n if eggs[j][0] <= 0: crashed[j] = False\n eggs[i][0] += eggs[j][1]\n eggs[j][0] += eggs[i][1]\n DFS(0, 0)\n return max_cnt\nprint(sol())","repo_name":"mintropy/algorithm_pulzo","sub_path":"지현배/2108/0817/16987.py","file_name":"16987.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"22903760602","text":"# Adamın çözdüğü\n# Çözüm sayısı 0 | Hedef 5 çözüm\nclass Solution(object):\n def mergeAlternately(self, word1, word2):\n res = []\n i, j = 0, 0\n \n while len(word1) > i or len(word2) > j:\n if len(word1) > i:\n res.append(word1[i])\n i += 1\n if len(word2) > j:\n res.append(word2[j])\n j += 1\n \n return ''.join(res)\n\n\nprint(Solution().mergeAlternately('ab','pqrs'))\n\n\n# Benim çözdüğüm\n# O(n^2) Time Complexity çünkü res += string oluyor burada stringi yeniden tanımlayıp kendi içinde loopa sokuyor\nclass Solution2(object):\n def mergeAlternately(self, word1, word2):\n res = ''\n i, j = 0, 0\n\n while len(word1) > i and len(word2) > j:\n res += word1[i] \n res += word2[j]\n i += 1\n j += 1\n\n if i == len(word1):\n res += word2[j:]\n elif j == len(word2):\n res += word1[i:]\n\n return res \n\n\n\n \n","repo_name":"merthamit/Over-300-leetcode-solutions","sub_path":"leetcodes questions/1768.py","file_name":"1768.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"12896290884","text":"class Solution:\n\n def __init__(self, head: Optional[ListNode]):\n ########### Approach 1 O(n) #######################\n # Find the length of the linked list, n, here to get the upper bound\n\n ########### Approach 2: O(n) #######################\n # Use a hashmap to traverse the linked list once, mapping indexes to each node\n # e.g.\n # 0 -> head\n # 1 -> head.next\n # ...\n # Edge cases: empty list\n self.head = [head][0]\n self.lookup = {}\n self.n = 0\n while head:\n self.lookup[self.n] = [head][0]\n head = head.next\n self.n += 1\n\n def getRandom(self) -> int:\n ########### Approach 1: O(n) #######################\n # How to generate a random number?\n # choose a random number, called i, between 0 and n - 1 with random.randint\n # and traverse the linekd list i times and return the value\n\n ########### Approach 2: O(1) #######################\n # pick a random number and return the node mapped by the lookup\n # e.g.\n # return lookup.get(random.randint(0, self.n-1))\n return self.lookup[random.randint(0, self.n-1)].val\n \n\n\n# Your Solution object will be instantiated and called as such:\n# obj = Solution(head)\n# param_1 = obj.getRandom()","repo_name":"peter-lucia/leetcode","sub_path":"problems/linked_list_random_node/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18405873479","text":"import itertools; import math; import operator; import random; from bisect import *; from collections import deque, defaultdict, Counter, OrderedDict; from heapq import *; import unittest; from typing import List;\ndef get_sol(): return Solution()\n\nclass Solution:\n def circularArrayLoop(self, nums: List[int]) -> bool:\n n=len(nums)\n for i in range(n):\n vis = set()\n if nums[i]>0:\n while True:\n if nums[i]<0: break\n if abs(nums[i]%n)==0: break # case: -1,-2,-3,-4,-5\n if i in vis and len(vis)>1: return True\n if i in vis and len(vis)==1: break\n vis.add(i)\n i= (i + nums[i])%n\n if not 0<=i0: break\n if abs(nums[i]%n)==0: break # case: -1,-2,-3,-4,-5\n if i in vis and len(vis)>1: return True\n if i in vis and len(vis)==1: break\n vis.add(i)\n i= (i + nums[i])%n\n if not 0<=i', self._on_canvas_resize)\n\n self._root_window.rowconfigure(0, weight = 1)\n self._root_window.columnconfigure(0, weight = 1)\n\n\n def start(self):\n self._root_window.mainloop()\n\n\n # Because of the call we made to bind() in the __init__ method,\n # this method is called whenever the size of the canvas changes.\n # We respond by calling our own _redraw() method to redraw the\n # image, given the new size of the canvas.\n def _on_canvas_resize(self, event):\n self._redraw()\n\n\n def _redraw(self):\n # Remove all of the shapes currently in the canvas. (For a fun\n # effect, comment this line out and re-run the program. Why does\n # it behave differently?)\n self._canvas.delete(tkinter.ALL)\n\n # Find out how big the canvas is, in terms of pixels, now.\n width = self._canvas.winfo_width()\n height = self._canvas.winfo_height()\n # Draw the rings. We always want the size of the rings to be\n # in the same proportions as the size of the canvas, so we're\n # passing \"fractional coordinates\" instead of \"absolute coordinates\".\n # When we actually draw ovals on the canvas, we'll convert the\n # fractional coordinates (ranging from 0.0 to 1.0 in the x and y\n # directions) to absolute coordinates (in terms of pixels, with\n # the range changing as the size of the canvas changes).\n self._draw_ring(width, height, .05, .05, .32, .32)\n self._draw_ring(width, height, .32, .32, .64, .64)\n self._draw_ring(width, height, .69, .69, .96, .96)\n self._draw_ring(width, height, .19, .19, .46, .46)\n self._draw_ring(width, height, .51, .51, .78, .78)\n\n\n def _draw_ring(self, width, height, tl_fracx, tl_fracy, br_fracx, br_fracy):\n # Given the width and height of the canvas, along with fractional\n # coordinates representing the top-left and bottom-right points of\n # the bounding box around the oval we want to draw, draw the\n # corresponding oval. We have to convert the coordinates from\n # fractional to absolute in order to draw the oval, since Canvas'\n # create_oval() method expects absolute (pixel) coordinates. We\n # can do that by multiplying the fractional coordinate by the\n # width or height, respectively.\n self._canvas.create_oval(\n width * tl_fracx, height * tl_fracy,\n width * br_fracx, height * br_fracy,\n outline='black')\n\n\n\nif __name__ == '__main__':\n app = OlympicsRingsApplication()\n app.start()\n","repo_name":"409230250/Programming-with-Software-Libraries-on-Python","sub_path":"Notes and Examples From Lecture/Fed 21 l 26 olympic_rings.py","file_name":"Fed 21 l 26 olympic_rings.py","file_ext":"py","file_size_in_byte":4974,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"21"} +{"seq_id":"5866449149","text":"#!/usr/bin/env python3\n\nimport json\nfrom urllib.request import urlopen, Request\nimport time\nimport sys\nimport logging\nlogging.basicConfig(stream=sys.stderr, level=logging.DEBUG)\n\n\nuser = 'fgalas525@gmail.com'\ntask = 'any2txt|wcrft2|liner2({\"model\":\"n82\"})'\nurl = 'http://ws.clarin-pl.eu/nlprest2/base'\nin_file = 'data.zip'\nout_file = 'out.zip'\n\n\ndef upload(file):\n logging.debug('Uploading file: {}'.format(file))\n resp = urlopen(Request('{}/upload/'.format(url), data=open(file, 'rb'),\n headers={'Content-Type': 'binary/octet-stream'})).read().decode('ascii')\n logging.debug('Response: {}'.format(resp))\n return resp\n\n\ndef process(data):\n doc = json.dumps(data)\n req = Request('{}/startTask/'.format(url), data=doc.encode('ascii'), headers={'Content-Type': 'application/json'})\n logging.debug('Requesting: url=\"{}\", data=\"{}\"'.format(req.full_url, req.data))\n taskid = urlopen(req).read().decode('ascii')\n logging.debug('Response: {}'.format(taskid))\n time.sleep(0.2)\n req = Request('{}/getStatus/{}'.format(url, taskid))\n logging.debug('Requesting: url=\"{}\", data=\"{}\"'.format(req.full_url, req.data))\n resp = urlopen(req).read().decode('ascii')\n logging.debug('Response: {}'.format(resp))\n data = json.loads(resp)\n while data[\"status\"] == \"QUEUE\" or data[\"status\"] == \"PROCESSING\":\n time.sleep(0.5)\n req = Request('{}/getStatus/{}'.format(url, taskid))\n logging.debug('Requesting: url=\"{}\", data=\"{}\"'.format(req.full_url, req.data))\n resp = urlopen(req).read().decode('ascii')\n logging.debug('Response: {}'.format(resp))\n data = json.loads(resp)\n if data[\"status\"] == \"ERROR\":\n logging.error(\"Error {}\".format(data['value']))\n return None\n return data[\"value\"]\n\n\ndef main():\n fileid = upload(in_file)\n data = process({'lpmn': 'filezip({fileid})|{task}|dir|makezip'.format(fileid=fileid, task=task), 'user': user})\n if data is not None:\n data = data[0][\"fileID\"]\n req = Request('{}/download{}'.format(url, data))\n logging.debug('Requesting: url=\"{}\", data=\"{}\"'.format(req.full_url, req.data))\n content = urlopen(req).read()\n logging.debug('Response: {}'.format(content))\n with open(out_file, \"wb\") as outfile:\n outfile.write(content)\n\n\nif __name__ == '__main__':\n exit(main())\n","repo_name":"zalon525/pjn","sub_path":"8-ner/process_package.py","file_name":"process_package.py","file_ext":"py","file_size_in_byte":2382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"146309499","text":"from collections import Counter \n \ndef most_frequent(List): \n occurence_count = Counter(List) \n return occurence_count.most_common(1)[0] \n \npossibilities = []\n\na, b = [int(i) for i in input().split()]\nfor i in range(1, a+1):\n for j in range(1, b+1):\n possibilities.append(i+j)\n\n\nprint(most_frequent(possibilities)) \n","repo_name":"Kingston802/kattis-solutions","sub_path":"dicecup.py","file_name":"dicecup.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7995535732","text":"import json\nfrom pathlib import Path\nfrom statistics import mean\n\nfrom PIL import Image\n\nfrom .rotate import pil_load_with_jpeg_exif_rot_applied\n\n# hide import inside function to prevent PlomClient depending on it\n# from zxingcpp import read_barcodes, BarcodeFormat\n\n\ndef findCorner(qr, dim):\n \"\"\"Determines the x-y coordinates and relative location of the given QR code's approximate centre.\n\n Args:\n qr (zxingcpp.Result): object containing the information stored in the QR code\n dim (tuple): pair of ints that correspond to the dimensions of\n the image that contains the QR code.\n\n Return:\n tuple: a triple ``(str, mx, my)`` where ``str`` is a 2-char string, one of\n \"NE\", \"NE\", \"SW\", \"SE\", depending on the relative location of the QR code,\n or \"??\" if the QR code cannot be detected. ``mx, my`` are either ints that correspond\n to the (x, y) coordinates of the QR code's centre location in the image, or None\n if the QR code is not detected and there are no coordinates to return.\n \"\"\"\n qr_polygon = [\n qr.position.top_left,\n qr.position.top_right,\n qr.position.bottom_left,\n qr.position.bottom_right,\n ]\n mx = mean([p.x for p in qr_polygon])\n my = mean([p.y for p in qr_polygon])\n width, height = dim\n\n NS = \"?\"\n EW = \"?\"\n if my < 0.4 * height:\n NS = \"N\"\n elif my > 0.6 * height:\n NS = \"S\"\n else:\n return \"??\", None, None\n if mx < 0.4 * width:\n EW = \"W\"\n elif mx > 0.6 * width:\n EW = \"E\"\n else:\n return \"??\", None, None\n return NS + EW, mx, my\n\n\ndef QRextract(image, *, try_harder=True, rotation=0):\n \"\"\"Decode and return QR codes in an image.\n\n Args:\n image (str/pathlib.Path/PIL.Image): an image filename, either in\n the local dir or specified e.g., using `pathlib.Path`. Can\n also be an instance of Pillow's `Image`.\n\n Keyword Args:\n try_harder (bool): Try to find QRs on a smaller resolution.\n Defaults to True. Sometimes this seems work around high\n failure rates in the synthetic images used in CI testing.\n Details below.\n rotation (int): Rotate the image by 90, -90, 180 or 270 degrees\n counterclockwise prior to reading the QR codes. Defaults to 0.\n\n Returns:\n dict/None: Keys \"NW\", \"NE\", \"SW\", \"SE\", each with a dict containing\n a 'tpv_signature', 'x', 'y' keys that correspond to strings extracted from\n QR codes (one string per code) and the x-y coordinates of the QR code.\n The dict is empty if no QR codes found in that corner.\n\n Without the `try_harder` flag, we observe high failure rates when\n the vertical resolution is near 2000 pixels (our current default).\n This is Issue #967 [1]. It is not prevalent in real-life images,\n but causes a roughly 5%-10% failure rate in our synthetic CI runs.\n The workaround (on by default) uses Pillow's `.reduce()` to quickly\n downscale the image. This does increase the run time (have not\n checked by how much: I assume between 25% and 50%) so if that is\n more of a concern than error rate, turn off this flag.\n\n [1] https://gitlab.com/plom/plom/-/issues/967\n \"\"\"\n # hide import inside function to prevent PlomClient depending on it\n from zxingcpp import read_barcodes, BarcodeFormat\n\n cornerQR = {\"NW\": {}, \"NE\": {}, \"SW\": {}, \"SE\": {}}\n\n if not isinstance(image, Image.Image):\n image = pil_load_with_jpeg_exif_rot_applied(image)\n\n if rotation != 0:\n assert rotation in (-90, 90, 270, 180)\n image = image.rotate(rotation, expand=True)\n\n # PIL does lazy loading. Force loading now so we see errors now.\n # Otherwise, zxing-cpp might hide error messages, Issue #2597\n image.load()\n\n try:\n micro = BarcodeFormat.MicroQRCode\n except AttributeError:\n # workaround github.com/zxing-cpp/zxing-cpp/issues/512\n micro = BarcodeFormat.MircoQRCode\n\n qrlist = read_barcodes(image, formats=(BarcodeFormat.QRCode | micro))\n for qr in qrlist:\n cnr, x_coord, y_coord = findCorner(qr, image.size)\n if cnr in cornerQR.keys():\n cornerQR[cnr].update({\"tpv_signature\": qr.text, \"x\": x_coord, \"y\": y_coord})\n\n if try_harder:\n # Try again on smaller image: originally for pyzbar (Issue #967), but I\n # think I've seen this find a QR-code missed by the above since\n # switching to ZXing-cpp (Issue #2520), so we'll leave it.\n try:\n image = image.reduce(2)\n except ValueError:\n # mode-P (paletted pngs) fail to reduce, Issue #2631\n qrlist = []\n else:\n qrlist = read_barcodes(image, formats=(BarcodeFormat.QRCode | micro))\n for qr in qrlist:\n cnr, x_coord, y_coord = findCorner(qr, image.size)\n if cnr in cornerQR.keys():\n s = qr.text\n prev_tpv_signature = cornerQR[cnr].get(\"tpv_signature\")\n if not prev_tpv_signature:\n # TODO: log these failures?\n # print(\n # f'Found QR-code \"{s}\" at {cnr} on reduced image, '\n # \"not found at original size\"\n # )\n cornerQR[cnr].update(\n {\"tpv_signature\": s, \"x\": x_coord, \"y\": y_coord}\n )\n elif s == prev_tpv_signature:\n # no-op, we already read this at the previous resolution\n pass\n else:\n # TODO: found a different QR code at lower resolution!\n # For now, just ignore and keep the previous hires result\n pass\n\n return cornerQR\n\n\ndef QRextract_legacy(image, write_to_file=True, try_harder=True):\n \"\"\"Decode QR codes in an image, return or save them in .qr file.\n\n Args:\n image (str/pathlib.Path/PIL.Image): an image filename, either in\n the local dir or specified e.g., using `pathlib.Path`. Can\n also be an instance of Pillow's `Image`.\n write_to_file (bool): by default, the results are written into\n a file named `image.qr` (i.e., the same as input name\n with `.qr` appended, so something like `foo.jpg.qr`).\n If this `.qr` file already exists and is non-empty, then no\n action is taken, and None is returned.\n try_harder (bool): Try to find QRs on a smaller resolution.\n Defaults to True. Sometimes this seems work around high\n failure rates in the synthetic images used in CI testing.\n Details blow.\n\n Returns:\n dict/None: Keys \"NW\", \"NE\", \"SW\", \"SE\", each with a list of the\n strings extracted from QR codes, one string per code. The\n list is empty if no QR codes found in that corner.\n\n Without the `try_harder` flag, we observe high failure rates when\n the vertical resolution is near 2000 pixels (our current default).\n This is Issue #967 [1]. It is not prevalent in real-life images,\n but causes a roughly 5%-10% failure rate in our synthetic CI runs.\n The workaround (on by default) uses Pillow's `.reduce()` to quickly\n downscale the image. This does increase the run time (have not\n checked by how much: I assume between 25% and 50%) so if that is\n more of a concern than error rate, turn off this flag.\n\n [1] https://gitlab.com/plom/plom/-/issues/967\n \"\"\"\n # hide import inside function to prevent PlomClient depending on it\n from zxingcpp import read_barcodes, BarcodeFormat\n\n if write_to_file:\n image = Path(image)\n # foo.jpg to foo.jpg.qr\n qrfile = image.with_suffix(\"{}.qr\".format(image.suffix))\n if qrfile.exists() and qrfile.stat().st_size > 0:\n return None\n\n cornerQR = {\"NW\": [], \"NE\": [], \"SW\": [], \"SE\": []}\n\n if not isinstance(image, Image.Image):\n image = pil_load_with_jpeg_exif_rot_applied(image)\n\n # PIL does lazy loading. Force loading now so we see errors now.\n # Otherwise, zxing-cpp might hide error messages, Issue #2597\n image.load()\n\n try:\n micro = BarcodeFormat.MicroQRCode\n except AttributeError:\n # workaround github.com/zxing-cpp/zxing-cpp/issues/512\n micro = BarcodeFormat.MircoQRCode\n\n qrlist = read_barcodes(image, formats=(BarcodeFormat.QRCode | micro))\n for qr in qrlist:\n cnr = findCorner(qr, image.size)[0]\n if cnr in cornerQR.keys():\n cornerQR[cnr].append(qr.text)\n\n if try_harder:\n # try again on smaller image: avoids random CI failures #967?\n image = image.reduce(2)\n qrlist = read_barcodes(image, formats=(BarcodeFormat.QRCode | micro))\n for qr in qrlist:\n cnr = findCorner(qr, image.size)[0]\n if cnr in cornerQR.keys():\n s = qr.text\n if s not in cornerQR[cnr]:\n # TODO: log these failures?\n # print(\n # f'Found QR-code \"{s}\" at {cnr} on reduced image, '\n # \"not found at original size\"\n # )\n cornerQR[cnr].append(s)\n\n if write_to_file:\n with open(qrfile, \"w\") as fh:\n json.dump(cornerQR, fh)\n\n return cornerQR\n","repo_name":"plomgrading/plom","sub_path":"plom/scan/fasterQRExtract.py","file_name":"fasterQRExtract.py","file_ext":"py","file_size_in_byte":9404,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"21"} +{"seq_id":"72065642934","text":"from ulab import numpy as np\nimport urandom\n\ndef solve_gen_eig_prob(A, B, eps=1e-5):\n \"\"\"\n Solves the generalised eigenvalue problem of the form:\n Aw = \\lambda*Bw\n\n Note: can be validated against `scipy.linalg.eig(A, b=B)`\n\n Ref:\n 'Eigenvalue and Generalized Eigenvalue Problems: Tutorial (2019)'\n Benyamin Ghojogh and Fakhri Karray and Mark Crowley\n arXiv 1903.11240\n\n \"\"\"\n Lam_b, Phi_b = np.linalg.eig(B) # eig decomp of B alone\n Lam_b = np.eye(len(Lam_b)) * Lam_b # convert to diagonal matrix of eig vals\n\n Lam_b_sq = replace_nan(Lam_b ** 0.5) + np.eye(len(Lam_b)) * eps\n Phi_b_hat = np.dot(Phi_b, np.linalg.inv(Lam_b_sq))\n A_hat = np.dot(np.dot(Phi_b_hat.transpose(), A), Phi_b_hat)\n\n try:\n Lam_a, Phi_a = np.linalg.eig(A_hat)\n except ValueError:\n # if `ulab` raises a \"input matrix asymmetric\" error in analytical approach, \n # we have to estimate eigen-pair iteratively\n Lam_a, Phi_a = solve_eig_qr(A_hat)\n\n Lam_a = np.eye(len(Lam_a)) * Lam_a\n\n Lam = Lam_a\n Phi = np.dot(Phi_b_hat, Phi_a)\n\n return np.diag(Lam), Phi\n\n\ndef solve_eig_qr(A, iterations=30):\n\n \"\"\"\n Use the QR iteration algorithm to iteratively solve for the eigenvectors and eigenvalues\n of a matrix A. Note: only guaranteed to recover exactly for symmetric matrices\n with real eigenvalues. May work partially for asymmetric matrices (no complex support yet).\n\n Returns:\n `lam`: vector of eigenvalues\n `Q_bar`: matrix of eigenvectors (columns)\n \"\"\"\n\n Ak = A\n Q_bar = np.eye(len(Ak))\n\n for _ in range(iterations):\n Qk, Rk = np.linalg.qr(Ak)\n Ak = np.dot(Rk, Qk)\n Q_bar = np.dot(Q_bar, Qk)\n\n lam = np.diag(Ak)\n return lam, Q_bar\n\n\ndef power_iteration(A, iterations):\n \"\"\"\n Iterative algo. to find the eigenvector of a matrix A corresponding to the largest\n eigenvalue.\n\n TODO: Establish some measure or heuristic of min number of iterations required\n \"\"\"\n # choose random initial vector to reduce risk of choosing one orthogonal to\n # target eigen vector\n b_k = np.array([urandom.random() for i in range(len(A))])\n\n for _ in range(iterations):\n b_k1 = np.dot(A, b_k)\n b_k1_norm = np.linalg.norm(b_k1)\n # re normalize the vector\n b_k = b_k1 / b_k1_norm\n\n return b_k1_norm, b_k\n\n\ndef max_eig(A, iterations, numeric_method=\"qr\"):\n \"\"\"\n Function to return the largest eigenvalue of a matrix and its corresponding eigenvector.\n\n A must be square but need not be symmetric. Tries to first use uLab `np.linalg.eig`\n that is better optimised but requires a symmetric matrix. Failing this, power iteration\n algorithm is used.\n \"\"\"\n try:\n lam, V = np.linalg.eig(A)\n v = V[:, np.argmax(lam)]\n except ValueError:\n if numeric_method == \"power_iteration\":\n lam, v = power_iteration(A, iterations)\n else:\n if numeric_method != \"qr\":\n print(\"Unknown `numeric_method` arg: defaulting to QR solver\")\n lam, v = solve_eig_qr(A, iterations)\n lam = lam[0] # only need first eigen val (largest returned first)\n v = v[:, 0] # only first eig vector\n\n return lam, v\n\n\ndef resample(X, factor):\n \"\"\"\n Perform downsampling of signal `X` by an integer `factor`.\n \"\"\"\n idx_rs = np.arange(0, len(X) - 1, factor)\n return X[idx_rs]\n\n\ndef standardise(X):\n axis = np.argmax(X.shape)\n minor_shape = np.min(X.shape)\n mu = np.mean(X, axis=axis).reshape((minor_shape, 1))\n sigma = np.std(X, axis=axis).reshape((minor_shape, 1))\n return (X - mu) / sigma\n\n\ndef cov(X, Y, biased=False):\n assert (\n X.shape == Y.shape and len(X.shape) == 1\n ), \"Expected data vectors of equal length\"\n assert len(X) > 1, \"At least 2 data points are required\"\n\n X = X - np.mean(X)\n Y = Y - np.mean(Y)\n denom = len(X) if biased else len(X) - 1\n\n return (np.sum(X * Y)) / denom\n\n\ndef corr(X, Y):\n assert (\n X.shape == Y.shape and len(X.shape) == 1\n ), \"Expected data vectors of equal length\"\n assert len(X) > 1, \"At least 2 data points are required\"\n\n return cov(X, Y, biased=True) / (np.std(X) * np.std(Y))\n\n\ndef replace_nan(A, rep=0):\n return np.where(np.isfinite(A), A, rep)\n\ndef col_concat(*mats):\n \"\"\"\"\n Concatenate a variable number of matrices along their \n column axis (axis=1 using `numpy` convention).\n \"\"\"\n cols = sum([mat.shape[1] for mat in mats])\n rows = mats[0].shape[0]\n out = np.zeros((rows, cols))\n j = 0\n for mat in mats:\n mat_cols = mat.shape[1]\n out[:, j:j+mat_cols] = mat\n j += mat_cols\n \n return out\n\ndef zeros_like(A):\n return np.zeros(A.shape)\n\ndef block_diag(X, Y, reverse=False):\n if not reverse:\n X = np.concatenate((X, zeros_like(X)), axis=1)\n Y = np.concatenate((zeros_like(Y), Y), axis=1)\n else:\n X = np.concatenate((zeros_like(X), X), axis=1)\n Y = np.concatenate((Y, zeros_like(Y)), axis=1)\n return np.concatenate((X, Y), axis=0)\n\ndef sign(x):\n \"\"\"\n Return the sign of a numerical variable.\n \"\"\"\n x+1 # arb operation to raise an error if non-numeric arg given.\n return 1 if x >=0 else -1","repo_name":"JamesTev/EEG-decoding","sub_path":"micropython/lib/computation.py","file_name":"computation.py","file_ext":"py","file_size_in_byte":5258,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"21"} +{"seq_id":"34658097671","text":"import socket\nimport tqdm\nimport os\nfrom Crypto.PublicKey import RSA\nfrom Crypto import Random\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind((socket.gethostname(), 8090))\ns.listen(5)\n\nSEPARATOR = \"\"\n\nwhile True:\n clientsocket, address = s.accept()\n print(f'Koneksi dengan {address} berhasil!')\n # clientsocket.send(bytes('Selamat datang di server kami!', 'utf-8'))\n received = clientsocket.recv(4096).decode()\n filename, filesize = received.split(SEPARATOR)\n filename = os.path.basename(filename)\n filesize = int(filesize)\n\n # start receiving the file from the socket\n # and writing to the file stream\n progress = tqdm.tqdm(range(filesize), f\"Receiving {filename}\", unit=\"B\", unit_scale=True, unit_divisor=1024)\n with open('hasil'+filename, \"wb\") as f:\n while True:\n # read 1024 bytes from the socket (receive)\n bytes_read = clientsocket.recv(4096)\n if not bytes_read: \n # nothing is received\n # file transmitting is done\n break\n # write to the file the bytes we just received\n f.write(bytes_read)\n # update the progress bar\n progress.update(len(bytes_read))\n\n # close the client socket\n clientsocket.close()\n # close the server socket\n s.close()","repo_name":"dzakw/client-sock","sub_path":"server_sock_python.py","file_name":"server_sock_python.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15734889083","text":"from django import forms\nfrom .models import Jogador\nfrom .models import Jogo\nfrom .models import Equipa\nfrom .models import Marcacao\nfrom .models import Epoca\nfrom .models import Convocatoria\nfrom .models import ResultadoJogo\nfrom .models import Pontuacao\nfrom .models import AcaoDisciplinar\nfrom .models import TipoAcao\nfrom .models import TipoPontuacao\nfrom .models import Substituicao\nfrom .models import FaixaEtaria\nfrom .models import Modalidade\nfrom .models import Campeonato\n\n\n\n\n#JOGADOR\nclass JogadorForm(forms.ModelForm):\n\n class Meta:\n model = Jogador\n fields = ('nome','data_nasc','nif','telefone', 'email', 'morada')\n labels = {\n 'nome':'Nome Completo',\n 'data_nasc':'Data Nascimento',\n 'nif':'Nº de Contribuinte',\n 'telefone':'Telefone',\n 'email':'Email',\n 'morada':'Morada'\n }\n\n#EQUPA\nclass EquipaForm(forms.ModelForm):\n\n class Meta:\n model = Equipa\n fields = ('nome_equipa','fundacao','origem','telefone', 'email', 'descricao_equipa', 'treinador', 'modalidade', 'faixa_etaria')\n labels = {\n 'nome':'Nome Equipa',\n 'fundacao':'Ano de Fundação',\n 'origem':'Origem da Equipa',\n 'telefone':'Telefone',\n 'email':'Email',\n 'descricao_equipa':'Descrição da Equipa',\n 'treinador':'Treinador',\n 'modalidade':'Modalidade',\n 'faixa_etaria':'Faixa Etária'\n }\n\n#JOGO\nclass JogoForm(forms.ModelForm):\n\n class Meta:\n model = Jogo\n fields = ('dia', 'hora', 'localizacao')\n labels = {\n 'dia':'Dia do Jogo',\n 'hora':'Hora do Jogo',\n 'localizacao':'Localização do Jogo'\n }\n\n#MARCACAO\nclass MarcacaoForm(forms.ModelForm):\n\n class Meta:\n model = Marcacao\n fields = ('n_jogo', 'n_jogador', 'minuto', 'descricao')\n labels = {\n 'n_jogo':'Jogo',\n 'n_jogador':'Jogador',\n 'minuto':'Minuto da Marcação',\n 'descricao':'Descrição da Marcação'\n }\n#EPOCA\nclass EpocaForm(forms.ModelForm):\n\n class Meta:\n model = Epoca\n fields = ('n_campeonato', 'n_jogo', 'ano')\n labels = {\n 'n_campeonato':'Campeonato',\n 'n_jogo':'Jogo',\n 'ano':'Ano'\n }\n#CONVOCATORIA\nclass ConvocatoriaForm(forms.ModelForm):\n\n class Meta:\n model = Convocatoria\n fields = ('n_jogo', 'n_equipa', 'n_jogador', 'posicao', 'suplente')\n labels = {\n 'n_jogo':'Jogo',\n 'n_equipa':'Equipa',\n 'n_jogador':'Jogador',\n 'posicao':'Posição',\n 'suplente':'Suplente'\n }\n#RESULTADO\nclass ResultadoForm(forms.ModelForm):\n\n class Meta:\n model = ResultadoJogo\n fields = ('n_jogo', 'equipa_a', 'equipa_b', 'pontuacao_a', 'pontuacao_b')\n labels = {\n 'n_jogo':'Jogo',\n 'equipa_a':'Equipa A',\n 'equipa_b':'Equipa B',\n 'pontuacao_a':'Pontuação da Equipa A',\n 'pontuacao_b':'Pontuação da Equipa B'\n }\n#PONTUACAO\nclass PontuacaoForm(forms.ModelForm):\n\n class Meta:\n model = Pontuacao\n fields = ('n_equipa', 'n_campeonato', 'pontuacao_total')\n labels = {\n 'n_equipa':'Equipa',\n 'n_campeonato':'Campeonato',\n 'pontuacao_total':'Pontuação Total'\n }\n#ACAO\nclass AcaoForm(forms.ModelForm):\n\n class Meta:\n model = AcaoDisciplinar\n fields = ('descricao', 'tipo', 'jogador', 'jogo')\n labels = {\n 'descricao':'Descrição da Ação',\n 'tipo':'Tipo de Ação',\n 'jogador':'Jogador',\n 'jogo':'Jogo'\n }\n\n#TIPO ACAO\nclass TipoAcaoForm(forms.ModelForm):\n\n class Meta:\n model = TipoAcao\n fields = ('descricao', 'modalidade')\n labels = {\n 'descricao':'Descrição da Ação',\n 'modalidade':'Modalidade'\n }\n\n\n\n#SUBSTITUICAO\nclass SubstituicaoForm(forms.ModelForm):\n\n class Meta:\n model = Substituicao\n fields = ('jogo', 'jogador_entra', 'jogador_sai')\n labels = {\n 'jogo':'Jogo',\n 'jogador_entra':'Jogador Que Entra',\n 'jogador_sai':'Jogador Que Sai'\n }\n#CAMPEONATO\nclass CampeonatoForm(forms.ModelForm):\n\n class Meta:\n model = Campeonato\n fields = ('nome_campeonato',)\n labels = {\n 'nome_campeonato':'Nome Campeonato'\n }\n#MODALIDADE\nclass ModalidadeForm(forms.ModelForm):\n\n class Meta:\n model = Modalidade\n fields = ('nome_modalidade', 'tipo_pontuacao')\n labels = {\n 'nome_modalidade':'Nome Modalidade',\n 'tipo_pontuacao':'Tipo de Pontuação'\n }\n#TIPO PONTUACAO\nclass TipoPontuacaoForm(forms.ModelForm):\n\n class Meta:\n model = TipoPontuacao\n fields = ('descricao_tipo_pontuacao','vitoria','derrota','empate')\n labels = {\n 'descricao_tipo_pontuacao':'Descrição',\n 'vitoria':'Pontos Em Caso de Vitória',\n 'derrota':'Pontos Em Caso de Derrota',\n 'empate':'Pontos Em Caso de Empate'\n }\n#FAIXA ETARIA\nclass FaixaEtariaForm(forms.ModelForm):\n\n class Meta:\n model = FaixaEtaria\n fields = ('designacao',)\n labels = {\n 'designacao':'Designação'\n }\n\n","repo_name":"rutepenetra/Django-App","sub_path":"app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":5472,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10840533703","text":"\"\"\"\nCreate Button Posts imported from uniborg\nmodified for DarkCobra By jarvis210904\n\"\"\"\nimport os\nimport re \nfrom .. import CMD_HELP\nfrom telethon import events, Button\nfrom ..utils import admin_cmd, edit_or_reply\n\n# regex obtained from: https://github.com/PaulSonOfLars/tgbot/blob/master/tg_bot/modules/helper_funcs/string_handling.py#L23\nBTN_URL_REGEX = re.compile(r\"(\\[([^\\[]+?)\\]\\)\")\n\n@borg.on(admin_cmd(pattern=r\"cbutton(?: |$)(.*)\", outgoing=True))\nasync def _(event):\n chat = event.chat_id\n reply_message = await event.get_reply_message()\n if reply_message:\n markdown_note = reply_message.text\n else:\n markdown_note = event.pattern_match.group(1)\n prev = 0\n note_data = \"\"\n buttons = []\n for match in BTN_URL_REGEX.finditer(markdown_note):\n # Check if btnurl is escaped\n n_escapes = 0\n to_check = match.start(1) - 1\n while to_check > 0 and markdown_note[to_check] == \"\\\\\":\n n_escapes += 1\n to_check -= 1\n # if even, not escaped -> create button\n if n_escapes % 2 == 0:\n # create a thruple with button label, url, and newline status\n buttons.append((match.group(2), match.group(3), bool(match.group(4))))\n note_data += markdown_note[prev:match.start(1)]\n prev = match.end(1)\n # if odd, escaped -> move along\n else:\n note_data += markdown_note[prev:to_check]\n prev = match.start(1) - 1\n else:\n note_data += markdown_note[prev:]\n message_text = note_data.strip()\n tl_ib_buttons = build_keyboard(buttons)\n tgbot_reply_message = None\n if reply_message:\n if reply_message.media:\n tgbot_reply_message = await borg.download_media(reply_message.media)\n await tgbot.send_message(\n entity=chat,\n message=message_text,\n parse_mode=\"html\",\n file=tgbot_reply_message,\n link_preview=False,\n buttons=tl_ib_buttons,\n silent=True\n )\n await event.delete()\n if tgbot_reply_message:\n os.remove(tgbot_reply_message)\n \n# Helpers\ndef build_keyboard(buttons):\n keyb = []\n for btn in buttons:\n if btn[2] and keyb:\n keyb[-1].append(Button.url(btn[0], btn[1]))\n else:\n keyb.append([Button.url(btn[0], btn[1])])\n return keyb\n\nCMD_HELP.update({\n \"button\":\n \"**SYNTAX : **`.cbutton`\\\n \\n**USAGE :** Buttons must be in th format as [name on button] and narddown is Default to html\\\n \\n**EXAMPLE :** `.cbutton test [google] [BarkCobra] [support]`\\\n \"\n})\n","repo_name":"THESANSKARILADKA/Sanskari","sub_path":"userbot/plugins/button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":2786,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"35865829489","text":"import sys\n\ninput = sys.stdin.readline\n\nn = int(input())\npos_x = []\npos_y = []\nfor _ in range(n):\n x, y = map(int, input().split())\n pos_x.append(x)\n pos_y.append(y)\n\npos_x = sorted(pos_x)\npos_y = sorted(pos_y)\n\nmid = n // 2\nmid_x = pos_x[mid]\nmid_y = pos_y[mid]\n\ndist = 0\nfor i in range(n):\n dist += abs(pos_x[i] - mid_x) + abs(pos_y[i] - mid_y)\n\nprint(dist)\n","repo_name":"combiJihoon/AlgorithmStudy","sub_path":"greedy/14400.py","file_name":"14400.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"346176494","text":"from collections import Counter\nimport numpy as np\nfrom gensim.models import word2vec\nimport pickle\n\n\nclass data_utils:\n\n def __init__(self, model_to_load, emb_dim, nb_conc_words, nb_words_dictionary, start_placeholder, end_placeholder,\n pad_placeholder, unk_placeholder):\n\n self.embedding_dimensions = emb_dim\n self.max_nb_conc_words = nb_conc_words\n self.sentence_beginning = start_placeholder\n self.sentence_end = end_placeholder\n self.padding_placeholder = pad_placeholder\n self.unknown = unk_placeholder\n self.max_nb_words_dictionary = nb_words_dictionary\n self.model_to_load = model_to_load\n\n def word_2_vec(self):\n\n if not self.model_to_load:\n self.model_w2v = word2vec.Word2Vec(self.wrapped_sentences, size=self.embedding_dimensions)\n print(\"w2v model created according to the vocabulary\")\n else:\n self.model_w2v = None\n\n def check_for_unknown_words(self, sentence, nb_words):\n\n new_sentence = []\n vocabulary = self.vocabulary\n\n for i in range(0, nb_words):\n if sentence[i] in vocabulary:\n new_sentence.append(sentence[i])\n else:\n new_sentence.append(self.unknown)\n\n return new_sentence\n\n def wrapper_test_sentence_words(self):\n\n \"\"\"Use a special sentence-beginning symbol and a sentence-end symbol \n (please use exactly these, including brackets). The symbol is the input, \n when predicting the first word and the symbol you require your model \n to predict at the end of every sentence.\"\"\"\n\n print(\"Starting to wrap the sentences appropriately..\")\n self.wrapped_sentences = []\n\n total_unknown = 0\n\n for sentence in self.tokens_per_sentence:\n\n nb_words = len(sentence)\n padding_needed = 0\n\n \"\"\"TODO: check if nb_words+2 or +1 is sufficient (count eos and bos ?)\"\"\"\n if nb_words + 2 <= self.max_nb_conc_words:\n # needed padding in the sentence\n padding_needed = self.max_nb_conc_words - nb_words - 2\n\n wrapped_sentence = []\n wrapped_sentence.append(self.sentence_beginning)\n sentence = self.check_for_unknown_words(sentence, nb_words)\n wrapped_sentence.extend(sentence)\n wrapped_sentence.append(self.sentence_end)\n wrapped_sentence.extend(self.padding_placeholder for i in range(0, padding_needed))\n self.wrapped_sentences.append(wrapped_sentence)\n\n print(\"Finished preprocessing test sentences\")\n\n def wrapper_train_sentence_words(self):\n\n \"\"\"Use a special sentence-beginning symbol and a sentence-end symbol \n (please use exactly these, including brackets). The symbol is the input, \n when predicting the first word and the symbol you require your model \n to predict at the end of every sentence. Finally use for words not in the vocabulary\"\"\"\n\n print(\"Starting to wrap the sentences appropriately..\")\n self.wrapped_sentences = []\n\n total_unknown = 0\n\n for sentence in self.tokens_per_sentence:\n\n nb_words = len(sentence)\n padding_needed = 0\n\n if nb_words + 2 <= self.max_nb_conc_words:\n # needed padding in the sentence\n padding_needed = self.max_nb_conc_words - nb_words - 2\n\n wrapped_sentence = []\n wrapped_sentence.append(self.sentence_beginning)\n\n new_sentence = self.check_for_unknown_words(sentence, nb_words)\n\n wrapped_sentence.extend(new_sentence)\n wrapped_sentence.append(self.sentence_end)\n wrapped_sentence.extend(self.padding_placeholder for i in range(0, padding_needed))\n\n self.wrapped_sentences.append(wrapped_sentence)\n # print(wrapped_sentence)\n print(\"Total sentences considered for training \", len(self.wrapped_sentences))\n print(\"Finished preprocessing\")\n\n def wrapper_eval_sentence_words(self):\n\n \"\"\"Use a special sentence-beginning symbol and a sentence-end symbol \n (please use exactly these, including brackets). The symbol is the input,\n when predicting the first word and the symbol you require your model\n to predict at the end of every sentence.\"\"\"\n\n print(\"Starting to wrap the sentences appropriately..\")\n self.wrapped_sentences = []\n\n total_unknown = 0\n\n for sentence in self.tokens_per_sentence:\n nb_words = len(sentence)\n padding_needed = self.max_nb_conc_words - nb_words - 2\n\n wrapped_sentence = []\n wrapped_sentence.append(self.sentence_beginning)\n sentence = self.check_for_unknown_words(sentence, nb_words)\n wrapped_sentence.extend(sentence)\n wrapped_sentence.append(self.sentence_end)\n wrapped_sentence.extend(self.padding_placeholder for i in range(0, padding_needed))\n self.wrapped_sentences.append(wrapped_sentence)\n\n print(\"Finished preprocessing eval sentences\")\n\n#\n# def do_sanity_checks():\n# print(\"Total sentences \", len(self.wrapped_sentences))\n#\n# count = 0\n# total = 0\n# for sentence in self.wrapped_sentences:\n# for i in range(0, len(sentence)):\n# total = total + 1\n# if sentence[i] in self.vocabulary:\n# count = count + 1\n#\n# print(\"Sanity checks on the dataset, words in vocabulary of 20k words\")\n# print(\"Words found in vocabulary \", count)\n# print(\"Total in vocab \", total)\n# print(\"Total words found not in vocabulary \", total_unknown)\n\n\n def reduce_dictionary(self):\n words_values = self.vocabulary\n\n sorted_words_values = sorted(words_values.items(), key=lambda x: x[1])\n total_distinct_words = len(sorted_words_values)\n start_index = total_distinct_words - self.max_nb_words_dictionary\n\n if total_distinct_words > self.max_nb_words_dictionary:\n sorted_words_values = sorted_words_values[start_index + 4:total_distinct_words]\n\n self.vocabulary = dict(sorted_words_values)\n self.vocabulary[self.sentence_beginning] = 1\n self.vocabulary[self.sentence_end] = 1\n self.vocabulary[self.unknown] = 1\n self.vocabulary[self.padding_placeholder] = 1\n\n self.vocabulary = self.vocabulary\n self.vocabulary_words_list = list(self.vocabulary.keys())\n\n print(\"Vocabulary has been defined, its size is \", len(self.vocabulary))\n\n\n def define_dictionary(self):\n total_dictionary = {'key': 'value'}\n\n total_dictionary = Counter(self.tokenized_sentences)\n\n self.vocabulary = total_dictionary\n print(\"Dictionary has been defined\")\n\n\n def string_tokenizer(self, corpus):\n tokenized_sentences = []\n tokens_per_sentence = []\n array_of_words = []\n\n for sentence in corpus:\n array_of_words = sentence.split(\" \")\n tokenized_sentences.extend(array_of_words)\n tokens_per_sentence.append(array_of_words)\n\n self.tokenized_sentences = tokenized_sentences\n self.tokens_per_sentence = tokens_per_sentence\n print(\"Strings have been tokenized...\")\n\n\n def load_train_data(self, path_to_file):\n print(\"Loading train file...\")\n\n with open(path_to_file) as f:\n content = f.readlines()\n\n print(\"Starting the preprocessing..\")\n # you may also want to remove whitespace characters like `\\n` at the end of each line\n self.string_tokenizer([x.strip(\"\\n\") for x in content])\n self.define_dictionary()\n self.reduce_dictionary()\n self.wrapper_train_sentence_words()\n # self.do_sanity_checks()\n self.word_2_vec()\n\n with open(\"vocabulary.pkl\", 'wb') as output:\n pickle.dump(self.vocabulary, output, pickle.HIGHEST_PROTOCOL)\n print(\"Data utils saved as pkl\")\n\n return self.model_w2v, self.wrapped_sentences\n\n\n def load_eval_data(self, path_to_file, vocabulary_file_path):\n print(\"Loading test file...\")\n\n with open(path_to_file) as f:\n content = f.readlines()\n\n self.load_vocabulary(vocabulary_file_path)\n\n print(\"Starting the preprocessing..\")\n # you may also want to remove whitespace characters like `\\n` at the end of each line\n self.string_tokenizer([x.strip(\"\\n\") for x in content])\n self.wrapper_eval_sentence_words()\n # self.do_sanity_checks()\n\n return self.wrapped_sentences, list(self.vocabulary_words_list)\n\n\n def load_test_data(self, path_to_file, vocabulary_file_path):\n print(\"Loading test file...\")\n\n with open(path_to_file) as f:\n content = f.readlines()\n\n self.load_vocabulary(vocabulary_file_path)\n\n print(\"Starting the preprocessing..\")\n # you may also want to remove whitespace characters like `\\n` at the end of each line\n self.string_tokenizer([x.strip(\"\\n\") for x in content])\n self.wrapper_test_sentence_words()\n # self.do_sanity_checks()\n\n return self.wrapped_sentences, list(self.vocabulary_words_list)\n\n\n def load_vocabulary(self, pickle_filename):\n with open(pickle_filename, 'rb') as handle:\n self.vocabulary = pickle.load(handle)\n self.vocabulary_words_list = list(self.vocabulary.keys())\n # print(list(self.vocabulary))\n\n return self.vocabulary\n","repo_name":"robertah/nlu_project","sub_path":"task1/data_utilities.py","file_name":"data_utilities.py","file_ext":"py","file_size_in_byte":9657,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"39117684492","text":"import requests\nfrom bs4 import BeautifulSoup\n\nurl = requests.get('https://www.google.com.br/imghp?hl=pt-BR&tab=wi')\nsoup = BeautifulSoup(url.text,'lxml')\n\n'''----------------------------Extraindo Links------------------------------------'''\n\ndef extrair_links(content): \n links = set() # set() tipo uma lista porem faz um agregado de informação sem uma ordem\n\n for tag in soup.find_all('a', href=True):\n if tag['href'].startswith('http'):\n links.add(tag['href'])\n print(links)\n return links\n \n\n \n \nlinks = extrair_links(url.text)","repo_name":"rafamessis/Projetos_online","sub_path":"Projetos_Online/RMS_solution/Teste_pagina_link.py","file_name":"Teste_pagina_link.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40548484732","text":"# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nfrom typing import Dict\n\nsys.path.insert(0, os.path.abspath(\"../../../\"))\nsys.path.insert(0, os.path.abspath(\"../../../monkey/\"))\nsys.path.insert(0, os.path.abspath(\"../../../monkey/common\"))\nsys.path.insert(0, os.path.abspath(\"../../../monkey/infection_monkey/\"))\nsys.path.insert(0, os.path.abspath(\"../../../monkey/monkey_island/\"))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Infection Monkey\"\ncopyright = \"2022, Akamai Ltd\"\nauthor = \"Akamai Ltd\"\n\n# The short X.Y version\nversion = \"\"\n# The full version, including alpha/beta/rc tags\nrelease = \"0.0.0\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\", # automatic documentation of single docstring\n \"sphinx.ext.autosummary\", # using autodoc automatic documentation of whole modules\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.intersphinx\", # references to external documentation\n \"sphinx.ext.extlinks\",\n \"sphinx_rtd_theme\", # ReadTheDocs theme\n \"sphinx.ext.inheritance_diagram\", # used for showing the inheritence diagram\n]\nautosummary_generate = True\nautosummary_imported_members = False\nautodoc_member_order = \"groupwise\"\n\nautodoc_mock_imports = [\n # \"flask\",\n # \"netifaces\",\n # `\"psutil\",\n # \"flask_restful\",\n # \"impacket\",\n # \"paramiko\",\n # \"gridfs\",\n # \"flask_pymongo\",\n # \"pypsrp\",\n \"pymongo\",\n # \"ring\",\n # \"botocore\",\n # \"flask_jwt_extended\",\n # \"pypykatz\",\n # \"spnego\",\n # \"jwt\",\n # \"bcrypt\",\n # \"Crypto\",\n # \"twisted\",\n # \"pymssql\",\n # \"nmb\",\n # \"odict\",\n # \"pyAesCrypt\",\n # \"dpath\",\n \"gevent\",\n \"ntsecuritycon\",\n # \"bson\",\n \"win32api\",\n # \"werkzeug\",\n # \"jsonschema\",\n # \"boto3\",\n # \"dateutil\",\n \"win32con\",\n \"win32security\",\n]\n\n# Mappings for sphinx.ext.intersphinx. Projects have to have Sphinx-generated doc! (.inv file)\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n}\n\nautoclass_content = \"both\" # Add __init__ doc (ie. params) to class summaries\nhtml_show_sourcelink = False # Remove 'view source code' from top of page (for html, not python)\nautodoc_inherit_docstrings = True # If no docstring, inherit from base class\nset_type_checking_flag = True # Enable 'expensive' imports for sphinx_autodoc_typehints\nautodoc_typehints = \"both\" # Sphinx-native. Not as good as sphinx_autodoc_typehints\nadd_module_names = False # Remove namespaces from class/method signatures\nhtml_show_sphinx = False # Shows \"Build with Sphinx and RTD schem text at footer\"\nhtml_show_copyright = True # Shows copyright using the company and author specified above\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"*node_modules*\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = None\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_favicon = \"_static/images/favicon.ico\"\nhtml_logo = \"_static/images/logo.gif\"\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\"logo_only\": \"true\"}\n\n# \"Edit on Github\" button\nhtml_context = {\n \"display_github\": True, # Integrate GitHub\n \"github_user\": \"guardicore\", # Username\n \"github_repo\": \"monkey\", # Repo name\n \"github_version\": \"develop\", # Version\n \"conf_py_path\": \"/monkey/monkey_island/docs/source/\", # Path in the checkout to the docs root\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\n# html_sidebars = {}\n\n# Adding CSS files to make it more like the Hugo documentation\n\nhtml_css_files = [\n \"css/all.css\",\n \"css/all.min.css\",\n \"css/bootstrap-grid.min.css\",\n \"css/bootsrap-grid.min.css.map\",\n \"css/brands.css\",\n \"css/brands.min.css\",\n \"css/fontawesome.css\",\n \"css/fontawesome.min.css\",\n \"css/labels.css\",\n \"css/regular.css\",\n \"css/regular.min.css\",\n \"css/shadow_around_images.css\",\n \"css/solid.css\",\n \"css/solid.min.css\",\n \"css/svg-with-js.css\",\n \"css/svg-with-js.min.css\",\n \"css/v4-shims.css\",\n \"css/v4-shims.min.css\",\n \"css/custom.css\",\n]\n\n# TODO: Investigate if we really need them,\n# html_js_files = [\"js/bootstrap.min.js\", \"js/bootstrap.min.js.map\"]\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"InfectionMonkeydoc\"\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements: Dict[str, str] = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, \"InfectionMonkey.tex\", \"Infection Monkey Documentation\", \"Akamai Ltd\", \"manual\"),\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"infectionmonkey\", \"Infection Monkey Documentation\", [author], 1)]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"InfectionMonkey\",\n \"Infection Monkey Documentation\",\n author,\n \"InfectionMonkey\",\n \"One line description of project.\",\n \"Miscellaneous\",\n ),\n]\n\n\n# -- Options for Epub output -------------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = project\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#\n# epub_identifier = ''\n\n# A unique identification for the text.\n#\n# epub_uid = ''\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = [\"search.html\"]\n\n\n# -- Extension configuration -------------------------------------------------\n","repo_name":"guardicore/monkey","sub_path":"monkey/monkey_island/docs/source/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":8804,"program_lang":"python","lang":"en","doc_type":"code","stars":6367,"dataset":"github-code","pt":"21"} +{"seq_id":"20138497011","text":"import os\n\nWINDOW_WIDTH = 900\nWINDOW_HEIGHT = 400\nWINDOW_POS_RIGHT = 400\nWINDOW_POS_LEFT = 200\n\nCOLOR_BG = 'gray'\nCOLOR_YELLOW = 'yellow'\nCOLOR_RED = 'red'\n\nPATH_DIR = os.path.dirname(__file__)\nPATH_IMG = os.path.join(PATH_DIR, 'images')","repo_name":"TienManh308/Tin","sub_path":"python/tkinter/01_change_colors/soucres/define.py","file_name":"define.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43937360100","text":"# This file is taken from emesene2 git.\n# \n# Small changes were made to make it work with Okeykoclient by NickCis\n#\n# Taken from emesene:\n# Emesene is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n\nimport gtk\nimport pango\nimport gobject\n\n#import Renderers\n#import extension\n\nclass TextField(gtk.VBox):\n '''this class represent a widget that is a button and when clicked\n it shows a textfield until the text is set, then the button appears again'''\n\n __gsignals__ = {\n 'text-changed': (gobject.SIGNAL_RUN_LAST, \n gobject.TYPE_NONE, \n (gobject.TYPE_PYOBJECT,gobject.TYPE_PYOBJECT))\n }\n\n def __init__(self, text, empty_text, allow_empty):\n '''class constructor, text is the text to show, empty_text is the\n text to display when no text is entered, allow_empty is a boolean\n that indicates if the user can enter an empty string'''\n gtk.VBox.__init__(self)\n\n self.entry = gtk.Entry()\n #self.label = Renderers.SmileyLabel()\n self.label = gtk.Label()\n #self.label.set_ellipsize(pango.ELLIPSIZE_END)\n self.button = gtk.Button()\n self.button.set_alignment(0.0, 0.5)\n self.button.set_relief(gtk.RELIEF_NONE)\n\n self._enabled = True\n\n self._text = text\n self.empty_text = empty_text\n self.allow_empty = allow_empty\n\n self.pack_start(self.button, True, True)\n self.pack_start(self.entry, True, True)\n\n self.button.add(self.label)\n #self.label.set_markup(self._text or self.empty_text)\n self.label.set_text(self._text or self.empty_text)\n\n self.button.connect('clicked', self.on_button_clicked)\n self.entry.connect('activate', self.on_entry_activate)\n self.entry.connect('focus-out-event', self._on_focus_out)\n self.entry.set_no_show_all(True)\n\n def on_button_clicked(self, button):\n '''method called when the button is clicked'''\n self.button.hide()\n self.entry.show()\n self.entry.grab_focus()\n\n def on_entry_activate(self, entry):\n '''method called when the user press enter on the entry''' \n # dialog = extension.get_default('dialog')\n #if not self.entry.get_text() and not self.allow_empty:\n # dialog.error(_(\"Empty text not allowed\"))\n # return\n\n new_text = self.entry.get_text()\n\n if new_text != self._text:\n old_text = self._text\n self.text = self.entry.get_text()\n self.emit('text-changed', old_text, self._text)\n\n self.entry.hide()\n self.button.show()\n\n def _on_focus_out(self, widget, event):\n '''called when the widget lost the focus'''\n self.on_entry_activate(self.entry)\n\n def show(self):\n '''override show'''\n gtk.VBox.show(self)\n self.button.show()\n self.entry.hide()\n self.label.show()\n\n def show_all(self):\n '''override the show method to not show both widgets on a show_all\n call'''\n self.show()\n\n def _get_text(self):\n '''return the value of text'''\n return self._text\n\n def _set_text(self, value):\n '''set the value of text'''\n self._text = value\n #self.label.set_markup(Renderers.msnplus_to_list(gobject.markup_escape_text(self._text)))\n self.label.set_text(self._text)\n self.entry.set_text(self._text)\n\n text = property(fget=_get_text, fset=_set_text)\n\n def _set_enabled(self, value):\n '''set the value of enabled and modify the widgets to reflect the status\n '''\n self._enabled = value\n self.button.set_sensitive(value)\n\n def _get_enabled(self):\n '''return the value of the enabled property\n '''\n\n return self._enabled\n\n enabled = property(fget=_get_enabled, fset=_set_enabled)\n\n","repo_name":"NickCis/Okeykoclient","sub_path":"okeykoclient/gui/gtkui/TextField.py","file_name":"TextField.py","file_ext":"py","file_size_in_byte":4046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30290234211","text":"import duckdb\nimport pandas as pd\n\nfrom pysdql.query.tpch.const import (\n DATAPATH,\n LINEITEM_COLS,\n ORDERS_COLS,\n CUSTOMER_COLS,\n NATION_COLS,\n REGION_COLS,\n PART_COLS,\n SUPPLIER_COLS,\n PARTSUPP_COLS,\n\n LINEITEM_TYPE,\n ORDERS_TYPE,\n CUSTOMER_TYPE,\n NATION_TYPE,\n REGION_TYPE,\n PART_TYPE,\n SUPPLIER_TYPE,\n PARTSUPP_TYPE,\n)\n\nfrom pysdql.query.tpch.Qpostgres.postgresT import (\n tpch_q13,\n)\n\nfrom pysdql.query.tpch.Qpandas.pandasQ import *\n\nfrom pysdql.query.util import compare_dataframe, pandas_to_df\n\n# show all columns\npd.set_option('display.max_columns', None)\n\nif __name__ == '__main__':\n\n # lineitem = pd.read_csv(rf'{DATAPATH}/lineitem.tbl', sep='|', index_col=False, header=None,\n # names=LINEITEM_COLS,\n # dtype=LINEITEM_TYPE,\n # parse_dates=['l_shipdate', 'l_commitdate', 'l_receiptdate'])\n customer = pd.read_csv(rf'{DATAPATH}/customer.tbl', sep='|', index_col=False, header=None, names=CUSTOMER_COLS)\n orders = pd.read_csv(rf'{DATAPATH}/orders.tbl', sep='|', index_col=False, header=None, names=ORDERS_COLS,\n parse_dates=['o_orderdate'])\n # nation = pd.read_csv(rf'{DATAPATH}/nation.tbl', sep='|', index_col=False, header=None, names=NATION_COLS)\n # supplier = pd.read_csv(rf'{DATAPATH}/supplier.tbl', sep='|', index_col=False, header=None, names=SUPPLIER_COLS)\n # part = pd.read_csv(rf'{DATAPATH}/part.tbl', sep='|', index_col=False, header=None, names=PART_COLS)\n # partsupp = pd.read_csv(rf'{DATAPATH}/partsupp.tbl', sep='|', index_col=False, header=None, names=PARTSUPP_COLS)\n\n # pd_result = tpch_q13(customer, orders)\n #\n # duck_conn = duckdb.connect(database=':memory:')\n #\n # duck_result = duck_conn.execute(duck_q13).df()\n #\n # compare_dataframe(pd_result, duck_result, verbose=True)\n #\n # print(duck_result)\n\n res1 = orders[~((orders['o_comment'].str.find('special') != -1)\n & (orders['o_comment'].str.rfind('requests') > (orders['o_comment'].str.find('special') + 6)))]\n\n res2 = orders[~(orders.o_comment.str.contains(\"^.*?special.*?requests.*?$\", regex=True))]\n\n print(res1.shape, res2.shape)\n\n # compare_dataframe(res1, res2, verbose=True)","repo_name":"Unka-Malloc/Pandas-Trim","sub_path":"demo4.py","file_name":"demo4.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"18220516245","text":"from ....ggettext import gettext as _\n\n#-------------------------------------------------------------------------\n#\n# GRAMPS modules\n#\n#-------------------------------------------------------------------------\nfrom ....datehandler import parser\nfrom ....lib.eventtype import EventType\nfrom .. import Rule\n\n#-------------------------------------------------------------------------\n#\n# HasBirth\n#\n#-------------------------------------------------------------------------\nclass HasData(Rule):\n \"\"\"Rule that checks for an event containing particular values\"\"\"\n\n labels = [ _('Event type:'), _('Date:'), _('Place:'),\n _('Description:') ]\n name = _('Events with ')\n description = _(\"Matches events with data of a particular value\")\n category = _('General filters')\n \n def __init__(self, list):\n Rule.__init__(self, list)\n\n self.event_type = self.list[0]\n self.date = self.list[1]\n self.place = self.list[2]\n self.description = self.list[3]\n\n if self.event_type:\n self.event_type = EventType()\n self.event_type.set_from_xml_str(self.list[0])\n\n if self.date:\n self.date = parser.parse(self.date)\n \n def apply(self, db, event):\n if self.event_type and event.get_type() != self.event_type:\n # No match\n return False\n\n ed = event.get_description().upper()\n if self.description and ed.find(self.description.upper()) == -1:\n # No match\n return False\n\n if self.date and not event.get_date_object().match(self.date):\n # No match\n return False\n\n if self.place:\n pl_id = event.get_place_handle()\n if pl_id:\n pl = db.get_place_from_handle(pl_id)\n pn = pl.get_title().upper()\n if pn.find(self.place.upper()) == -1:\n # No match\n return False\n else:\n # No place attached to event\n return False\n\n # All conditions matched\n return True\n","repo_name":"arunkgupta/gramps","sub_path":"gramps/gen/filters/rules/event/_hasdata.py","file_name":"_hasdata.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"17057112853","text":"import copy\nimport datetime\nimport logging\nimport re\nimport salt.exceptions\nimport salt.utils.event\nimport sys\nimport threading\nimport threading_more\nimport time\nimport urlparse\nimport uuid\n\nfrom common_util import ensure_primitive, last_iter, force_kwargs\nfrom salt_more import cached_loader, SuperiorCommandExecutionError\nfrom timeit import default_timer as timer\n\n\nlog = logging.getLogger(__name__)\n\nDEBUG = log.isEnabledFor(logging.DEBUG)\n\n\nclass MessageProcessor(object):\n \"\"\"\n Native hooks:\n - 'worker': TODO\n - 'workflow': TODO\n\n Built-in workers:\n - 'shared': Workflow is performed by the current message processor thread.\n - 'dedicated': Workflow is performed by a dedicated thread.\n\n Built-in workflows and their additional hooks:\n - 'simple': --> [trigger] --> [filter] --> [returner]\n - 'extended': [validator] (--> [returner]) --> --> [converter] --> [trigger] --> [filter] --> [enricher] --> [retuner]\n - 'manage': Special workflow for management stuff. See implementation for details.\n\n NOTES:\n - All hook functions registered using the hook decorator are synchronized before invocation to ensure thread safety.\n - If you add your own hook methods by inheriting from this class you are responsible for thread synchronization.\n \"\"\"\n\n def __init__(self, default_hooks={}):\n self._default_hooks = default_hooks\n self._hook_funcs = {} # Index of all registered hook functions\n self._hook_lock = threading.RLock() # Used to synchronize hook function calls\n self._measure_stats = False\n\n self.worker_threads = threading_more.ThreadRegistry() # Keeps track of all active workers\n\n # Set default hooks that will be used if none specified\n if not \"worker\" in self._default_hooks:\n self._default_hooks[\"worker\"] = \"shared\"\n if not \"workflow\" in self._default_hooks:\n self._default_hooks[\"workflow\"] = \"simple\"\n\n @property\n def measure_stats(self):\n return self._measure_stats\n\n @measure_stats.setter\n def measure_stats(self, value):\n self._measure_stats = value\n\n def register_hook(self, synchronize=True):\n \"\"\"\n Decorator to register hook functions for this message processor.\n\n Args:\n synchronize (bool): Enables thread synchronization for entire hook function.\n \"\"\"\n\n def decorator(func):\n ret_func = func\n\n # Wrap in synchronizer if requested\n if synchronize:\n ret_func = self._synchronize_wrapper(self._hook_lock, func)\n\n # Add function to hook registry\n name = func.__name__\n self._hook_funcs[name] = ret_func\n\n if DEBUG:\n log.debug(\"Registered hook function '%s'\", name)\n\n return ret_func\n\n return decorator\n\n def add_hook(self, name, kind, func, synchronize=True):\n \"\"\"\n Add hook function manually to this message processor.\n \"\"\"\n\n # Wrap in synchronizer if requested\n if synchronize:\n func = self._synchronize_wrapper(self._hook_lock, func)\n\n self._hook_funcs[\"{:}_{:}\".format(name, kind)] = func\n\n def process(self, message):\n \"\"\"\n Process a single message.\n \"\"\"\n\n # Find worker and use it\n func, settings = self._get_hook_for(message, \"worker\", parse_settings=True)\n result = func(message, **settings)\n\n return result\n\n #region Available workers\n\n def shared_worker(self, message, **settings):\n \"\"\"\n Run workflow in current thread.\n \"\"\"\n\n found, result = self._call_hook_for(message, \"workflow\", message)\n\n return result\n\n def dedicated_worker(self, message, **settings):\n \"\"\"\n Run workflow in a dedicated thread.\n \"\"\"\n\n # Check if we need to dequeue message from an existing worker thread\n if \"dequeue\" in settings:\n threads = self.worker_threads.do_for_all_by(settings[\"dequeue\"],\n lambda t: t.context[\"messages\"].remove(message))\n\n return {\n \"dequeued\": [t.name for t in threads]\n }\n\n # Check if we need to enqueue message to an existing worker thread\n if \"enqueue\" in settings:\n threads = self.worker_threads.do_for_all_by(settings[\"enqueue\"],\n lambda t: t.context[\"messages\"].append(message))\n\n return {\n \"enqueued\": [t.name for t in threads]\n }\n\n # Exceptions will NOT kill worker thread as default behavior\n suppress_exceptions = settings.pop(\"suppress_exceptions\", True)\n\n # Terminates worker thread after a successful run without warnings nor exceptions\n kill_upon_success = settings.pop(\"kill_upon_success\", False)\n\n # Perform entire job iteration transactionally\n transactional = settings.pop(\"transactional\", False)\n\n # Prepare function that performs actual work\n def do_work(thread, context):\n success = True\n\n # Loop through all messages found in thread context\n for message in list(context[\"messages\"]): # Copy list to allow changes while looping\n try:\n\n # Run workflow\n self._call_hook_for(message, \"workflow\", message)\n\n except Warning as wa:\n success = False\n\n # Register time of last warning in context\n context[\"last_warning\"] = datetime.datetime.utcnow().isoformat()\n\n # Also register all distinct warning messages in context\n msg = str(wa)\n context.setdefault(\"distinct_warnings\", {}).setdefault(msg, 0)\n context[\"distinct_warnings\"][msg] += 1\n\n # Only allow recurring warnings to be logged every minute\n if context[\"distinct_warnings\"][msg] > 3 \\\n and timer() - getattr(thread, \"warning_log_timer\", 0) < 60:\n return\n setattr(thread, \"warning_log_timer\", timer())\n\n # Go ahead and log the warning\n if context[\"distinct_warnings\"][msg] > 1:\n log.warning(\"Recurring warning ({:} times) in worker thread '{:}': {:}\".format(context[\"distinct_warnings\"][msg], thread.name, wa))\n else:\n # No need to show all recurring warnings on warning level\n log.info(\"Warning in worker thread '{:}': {:}\".format(thread.name, wa))\n\n except Exception as ex:\n success = False\n\n # Register time of last error in context\n context[\"last_error\"] = datetime.datetime.utcnow().isoformat()\n\n # Also register all distinct error messages in context\n msg = str(ex)\n context.setdefault(\"distinct_errors\", {}).setdefault(msg, 0)\n context[\"distinct_errors\"][msg] += 1\n\n # Only allow recurring exceptions to be logged every minute\n if suppress_exceptions and context[\"distinct_errors\"][msg] > 3 \\\n and timer() - getattr(thread, \"exception_log_timer\", 0) < 60:\n return\n setattr(thread, \"exception_log_timer\", timer())\n\n # Go ahead and log the exception\n if context[\"distinct_errors\"][msg] > 1:\n log.exception(\"Recurring exception ({:} times) in worker thread '{:}' while running workflow for message: {:}\".format(context[\"distinct_errors\"][msg], thread.name, message))\n else:\n log.exception(\"Exception in worker thread '{:}' while running workflow for message: {:}\".format(thread.name, message))\n\n # Finally suppress or propagate the exception\n if suppress_exceptions:\n if transactional:\n log.info(\"Suppressing prior exception in worker thread '{:}' and skips any following work\".format(thread.name))\n\n break\n else:\n log.info(\"Suppressing prior exception in worker thread '{:}' and continues as normal\".format(thread.name))\n else:\n raise\n\n # Clear any warnings and errors on success\n if success:\n context.pop(\"distinct_warnings\", None)\n context.pop(\"distinct_errors\", None)\n\n if kill_upon_success and success:\n thread.kill()\n\n if DEBUG:\n log.debug(\"Killed worker thread '{:}' upon successful run\".format(thread.name))\n\n # Start immediately is default\n start = settings.pop(\"start\", True)\n\n # Add new worker thread\n thread = threading_more.WorkerThread(\n target=self._synchronize_wrapper(self._hook_lock, do_work) if transactional else do_work,\n context={\"messages\": [message] if message else []},\n registry=self.worker_threads, # Registers thread in registry\n **settings) # Pass additional settings\n\n if start:\n thread.start()\n\n return {\n \"started\": thread.name\n }\n\n return {\n \"created\": thread.name\n }\n\n #endregion\n\n #region Available workflows\n\n def simple_workflow(self, message):\n \"\"\"\n Simlpe message processing flow and available hooks:\n\n --> [trigger] --> [filter] --> [returner]\n \"\"\"\n\n args = message.get(\"args\", [])\n kwargs = message.get(\"kwargs\", {})\n\n result = None\n try:\n\n # Call handler hook\n _, result = self._call_hook_for(message, \"handler\", *args, **kwargs)\n\n except Exception as ex:\n result = ex\n\n raise\n\n finally:\n\n # Always call trigger hook(s), also on error or empty result\n try:\n self._call_hooks_for(message, \"trigger\", result)\n except:\n pass # Already logged\n\n # Call filter hook (chain) if there is a result\n if result:\n found, filtered_result = self._call_hook_chain_for(message, \"filter\", result)\n if found:\n result = filtered_result\n\n # Call returner hook(s) if there is a result\n if result:\n self._call_hooks_for(message, \"returner\", message, result)\n\n return result\n\n def extended_workflow(self, message):\n \"\"\"\n Extended message processing flow and available hooks:\n\n [validator] --> --> [converter] --> [trigger] --> [filter] --> [enricher] --> [retuner]\n \"\"\"\n\n args = message.get(\"args\", [])\n kwargs = message.get(\"kwargs\", {})\n\n # Call validator hook (chain)\n found, error = self._call_hook_chain_for(message, \"validator\", *args, **kwargs)\n if found and error:\n raise Exception(error)\n\n result = None\n try:\n\n # Call handler hook\n _, result = self._call_hook_for(message, \"handler\", *args, **kwargs)\n\n # Call converter hook (chain) if there is a result\n if result:\n found, converted_result = self._call_hook_chain_for(message, \"converter\", result)\n if found:\n result = converted_result\n\n except Exception as ex:\n result = ex\n\n raise\n\n finally:\n\n # Always call trigger hook(s), also on error or empty result\n try:\n self._call_hooks_for(message, \"trigger\", result)\n except:\n pass # Already logged\n\n # Call filter hook (chain) if there is a result\n if result:\n found, filtered_result = self._call_hook_chain_for(message, \"filter\", result)\n if found:\n result = filtered_result\n\n # Call enricher hook (chain) if there is a result\n if result:\n found, enriched_result = self._call_hook_chain_for(message, \"enricher\", result)\n if found:\n result = enriched_result\n\n # Call returner hook(s) if there is a result\n if result: # TODO HN: How if returner fails if multiple?\n self._call_hooks_for(message, \"returner\", message, result)\n\n return result\n\n def manage_workflow(self, message):\n \"\"\"\n Administration workflow to query and manage this processor instance.\n\n Supported commands:\n - hook list|call [argument]... [=]...\n - worker list|show|create|start|pause|resume|kill [=]...\n - run =...\n \"\"\"\n\n args = message.get(\"args\", [])\n kwargs = message.get(\"kwargs\", {})\n\n if len(args) > 1 and args[0] == \"hook\":\n if args[1] == \"list\":\n return {\n \"values\": [h for h in self._hook_funcs]\n }\n\n elif args[1] == \"call\":\n res = self._get_func(args[2])(*args[3:], **kwargs)\n if isinstance(res, dict):\n return res.clone() # Avoid adding '_stamp' to original\n elif isinstance(res, (list, set, tuple)):\n return {\"values\": res}\n else:\n return {\"value\": res}\n\n elif len(args) > 1 and args[0] == \"worker\":\n if args[1] == \"list\":\n return {\n \"values\": [\n t.name for t in self.worker_threads.find_all_by(args[2] \\\n if len(args) > 2 else \"*\")\n ]\n }\n\n elif args[1] == \"show\":\n threads = self.worker_threads.find_all_by(args[2] if len(args) > 2 else \"*\")\n return {\n \"value\": {t.name: t.context for t in threads}\n }\n\n elif args[1] == \"create\":\n messages = args[2:] # all other arguments are considered messages\n\n # create worker\n res = self.dedicated_worker(None, **kwargs)\n\n # resolve name\n worker_name = None\n if res.get(\"started\", None):\n worker_name = res[\"started\"]\n elif res.get(\"created\", None):\n worker_name = res[\"created\"]\n else:\n raise Exception(\"Couldn't figure out worker name, skipping enqueueing of messages\")\n\n # Enqueue all messages to worker\n for message in messages:\n self.dedicated_worker(message, enqueue=worker_name)\n\n return {\n \"value\": worker_name,\n }\n\n elif args[1] == \"start\":\n threads = self.worker_threads.do_for_all_by(args[2], lambda t: t.start(**kwargs))\n return {\n \"values\": [t.name for t in threads]\n }\n\n elif args[1] == \"pause\":\n threads = self.worker_threads.do_for_all_by(args[2], lambda t: t.pause(**kwargs))\n return {\n \"values\": [t.name for t in threads]\n }\n\n elif args[1] == \"resume\":\n threads = self.worker_threads.do_for_all_by(args[2], lambda t: t.resume(**kwargs))\n return {\n \"values\": [t.name for t in threads]\n }\n\n elif args[1] == \"start_or_resume\":\n threads = self.worker_threads.do_for_all_by(args[2], lambda t: t.start_or_resume(**kwargs))\n return {\n \"values\": [t.name for t in threads]\n }\n\n elif args[1] == \"kill\":\n threads = self.worker_threads.do_for_all_by(args[2], lambda t: t.kill(**kwargs))\n return {\n \"values\": [t.name for t in threads]\n }\n\n elif len(args) > 0 and args[0] == \"run\":\n msg = kwargs\n return self.process(msg)\n\n elif len(args) > 0 and args[0] == \"exit\":\n sys.exit()\n return {}\n\n raise Exception(\"Invalid or unknown command\")\n\n #endregion\n\n #region Private helpers\n\n def _call_hook_for(self, message, kind, *args, **kwargs):\n func = self._get_hook_for(message, kind)\n if func:\n return True, func(*args, **kwargs)\n\n return False, None\n\n def _call_hooks_for(self, message, kind, *args, **kwargs):\n errors = []\n\n funcs = self._get_hooks_for(message, kind)\n for func in funcs:\n try:\n func(*args, **kwargs)\n except Exception as ex:\n log.exception(\"Error when calling {:} hook for message: {:}\".format(kind, message))\n\n errors.append(ex)\n\n # Raise if error(s)\n if errors:\n raise Exception(\"Failed to call {:}/{:} {:} hook(s) for message: {:}\".format(len(errors), len(funcs), kind, message))\n\n def _call_hook_chain_for(self, message, kind, *args, **kwargs):\n ret = (False, None)\n\n for func in self._get_hooks_for(message, kind):\n res = func(*args, **kwargs)\n ret = (True, res)\n\n if res != None:\n break\n\n return ret\n\n def _get_hook_for(self, message, kind, parse_settings=False):\n url = self._get_hook_url_for(message, kind)\n if not url:\n return\n\n name = url\n\n # Parse settings from url if requsted\n if parse_settings:\n name, settings = self._parse_hook_url(url)\n\n # Get hook function by name\n func = self._get_func(\"{:s}_{:s}\".format(name, kind))\n\n # Wrap hook function in order to measure statistics\n if self._measure_stats:\n func = self._stats_wrapper_for(message, kind, func)\n\n if parse_settings:\n return (func, settings)\n else:\n return func\n\n def _get_hooks_for(self, message, kind):\n ret = []\n\n url = self._get_hook_url_for(message, kind)\n if not url:\n return ret\n\n for name in url.split(\",\"):\n\n # Get hook function by name\n func = self._get_func(\"{:s}_{:s}\".format(name, kind))\n\n # Wrap hook function in order to measure statistics\n if self._measure_stats:\n func = self._stats_wrapper_for(message, kind, func)\n\n ret.append(func)\n\n return ret\n\n def _stats_wrapper_for(self, message, kind, func):\n def stats_wrapper(*args, **kwargs):\n start = timer()\n\n try:\n return func(*args, **kwargs)\n finally:\n duration = timer() - start\n\n stats = message.setdefault(\"_stats\", {}).setdefault(kind, {\n \"duration\": {\n \"acc\": 0.0,\n \"avg\": 0.0,\n \"min\": -1.0,\n \"max\": -1.0\n },\n \"count\": 0\n })\n stats[\"count\"] += 1\n stats[\"duration\"][\"acc\"] += duration\n stats[\"duration\"][\"avg\"] = stats[\"duration\"][\"acc\"] / stats[\"count\"]\n if duration < stats[\"duration\"][\"min\"] or stats[\"duration\"][\"min\"] < 0:\n stats[\"duration\"][\"min\"] = duration\n if duration > stats[\"duration\"][\"max\"]:\n stats[\"duration\"][\"max\"] = duration\n\n return stats_wrapper\n\n def _parse_hook_url(self, url):\n u = urlparse.urlparse(url)\n\n name = u.path\n settings = {}\n\n if u.query:\n qs = urlparse.parse_qs(u.query, strict_parsing=True)\n\n for k, v in qs.iteritems():\n\n # Convert into appropriate types using eval (integers, decimals and booleans)\n v = [eval(e) if re.match(\"^(?:[-+]?\\d*\\.?\\d*|True|False)$\", e) else e for e in v]\n\n if len(v) == 1:\n settings[k] = v[0]\n else:\n settings[k] = v\n\n return (name, settings)\n\n def _get_hook_url_for(self, message, kind):\n return message.get(kind, self._default_hooks.get(kind, None))\n\n def _get_func(self, name):\n if name in self._hook_funcs:\n return self._hook_funcs[name]\n elif hasattr(self, name):\n return getattr(self, name)\n else:\n raise Exception(\"No function found for hook '{:}'\".format(name))\n\n def _synchronize_wrapper(self, lock, func):\n def synchronizer(*args, **kwargs):\n with lock:\n return func(*args, **kwargs)\n\n return synchronizer\n\n #endregion\n\n\nclass EventDrivenMessageProcessor(MessageProcessor):\n\n def __init__(self, namespace, context={}, default_hooks={}):\n MessageProcessor.__init__(self, default_hooks)\n self._namespace = namespace\n self._context = context\n self._tag_regex = re.compile(\"^{:s}/req/(?P.+)$\".format(namespace))\n self._event_matchers = []\n self._bus_lock = threading.RLock() # Used to synchronize event bus function calls\n self._outgoing_event_filters = {}\n self._reactors = []\n\n def init(self, __salt__, __opts__, hooks=[], workers=[], reactors=[]):\n \"\"\"\n Initialize this instance.\n \"\"\"\n\n # Dedicated event bus handle for receiving events\n self._incoming_bus = salt.utils.event.get_event(\"minion\",\n opts=__opts__,\n transport=__opts__[\"transport\"],\n listen=True)\n\n # Dedicated event bus handle for sending events\n self._outgoing_bus = salt.utils.event.get_event(\"minion\",\n opts=__opts__,\n transport=__opts__[\"transport\"],\n listen=False)\n\n # Register matcher for event processor\n self.register_event_matcher(\n self._tag_regex.pattern,\n self.process_event,\n match_type=\"regex\")\n\n # Add given workflow hooks\n for hook in hooks or []:\n try:\n if not \".\" in hook[\"func\"]:\n hook_func = self._hook_funcs[hook[\"func\"]]\n\n if \"args\" in hook or \"kwargs\" in hook:\n\n @force_kwargs(_hook=hook, _hook_func=hook_func)\n def hook_wrapper(*args, **kwargs):\n hook = kwargs.pop(\"_hook\")\n hook_func = kwargs.pop(\"_hook_func\")\n\n args = list(args) + hook.get(\"args\", [])\n kwargs.update(hook.get(\"kwargs\", {}))\n\n return hook_func(*args, **kwargs)\n\n hook_func = hook_wrapper\n\n self.add_hook(hook[\"name\"], hook[\"kind\"], hook_func, synchronize=hook.get(\"lock\", False))\n else:\n\n # Special handling of returners\n if hook[\"kind\"] == \"returner\":\n returners = cached_loader(__salt__, __opts__, \"returners\", context=self._context)\n returner_func = returners[hook[\"func\"]]\n\n # Wrap returner function to add defined args and kwargs\n def returner_returner_wrapper(message, result, hook=hook, returner_func=returner_func):\n\n # Skip empty results\n if not result:\n return\n\n args = hook.get(\"args\", [])\n kwargs = hook.get(\"kwargs\", {})\n\n # Automatically set namespace as kind for data results\n if not args and returner_func.__name__ == \"returner_data\":\n args.append(self._namespace)\n\n return returner_func(result, *args, **kwargs)\n\n self.add_hook(hook[\"name\"], hook[\"kind\"], returner_returner_wrapper, synchronize=hook.get(\"lock\", False))\n\n else:\n modules = cached_loader(__salt__, __opts__, \"modules\", context=self._context)\n hook_func = modules[hook[\"func\"]]\n\n if \"args\" in hook or \"kwargs\" in hook:\n\n @force_kwargs(_hook=hook, _hook_func=hook_func)\n def hook_wrapper(*args, **kwargs):\n hook = kwargs.pop(\"_hook\")\n hook_func = kwargs.pop(\"_hook_func\")\n\n args = list(args) + hook.get(\"args\", [])\n kwargs.update(hook.get(\"kwargs\", {}))\n\n return hook_func(*args, **kwargs)\n\n hook_func = hook_wrapper\n\n self.add_hook(hook[\"name\"], hook[\"kind\"], hook_func, synchronize=hook.get(\"lock\", False))\n\n except Exception:\n log.exception(\"Failed to add hook: {:}\".format(hook))\n\n # Add given workers\n for worker in workers or []:\n messages = worker.pop(\"messages\")\n\n self.dedicated_worker(None, start=False, **worker)\n\n # Enqueue all messages to worker\n for message in messages:\n self.dedicated_worker(message, enqueue=worker[\"name\"])\n\n # Add given reactors\n for reactor in reactors or []:\n\n # Define function to handle events when matched\n def on_event(event, match=None, reactor=reactor):\n\n # Check if conditions is defined\n conditions = reactor.get(\"conditions\", [])\n if \"condition\" in reactor:\n conditions.append(reactor[\"condition\"])\n for index, condition in enumerate(conditions, 1):\n if keyword_resolve(condition, keywords={\"event\": event, \"match\": match, \"context\": self._context, \"salt\": __salt__, \"options\": __opts__}):\n log.info(\"Event meets condition #{:} '{:}': {:}\".format(index, condition, event))\n else:\n if DEBUG:\n log.debug(\"Event NOT meets condition #{:} '{:}': {:}\".format(index, condition, event))\n\n return\n\n # Process all action messages\n actions = reactor.get(\"actions\", [])\n if \"action\" in reactor:\n actions.append(reactor[\"action\"])\n for index, message in enumerate(actions, 1):\n\n # Check if keyword resolving is enabled\n if reactor.get(\"keyword_resolve\", False):\n resolved_message = keyword_resolve(copy.deepcopy(message), keywords={\"event\": event, \"match\": match, \"context\": self._context, \"salt\": __salt__, \"options\": __opts__})\n if DEBUG:\n log.debug(\"Keyword resolved message: {:}\".format(resolved_message))\n\n # TODO: Figure out if we can improve performance by processing each message in a dedicated worker thread or process?\n\n res = self.process(resolved_message)\n else:\n res = self.process(message)\n\n if index < len(actions) and reactor.get(\"chain_conditionally\", False):\n if not res or isinstance(res, dict) and not res.get(\"result\", True):\n if DEBUG:\n log.debug(\"Breaking action chain after message #{:} '{:}' because of result '{:}'\".format(index, message, result))\n\n break\n\n match_type = None\n if \"regex\" in reactor:\n match_type = \"regex\"\n elif \"startswith\" in reactor:\n match_type = \"startswith\"\n elif \"endswith\" in reactor:\n match_type = \"endswith\"\n elif \"fnmatch\" in reactor:\n match_type = \"fnmatch\"\n else:\n log.error(\"No valid match type found for reactor: {:}\".format(reactor))\n\n continue # Skip reactor\n\n # Register event matcher using above function\n if match_type:\n self.register_reactor(reactor, on_event, match_type=match_type)\n\n # Match init event\n try:\n self._match_event({\"tag\": \"_init\", \"data\": {}})\n except Exception:\n log.exception(\"Failed to match init event\")\n\n def _custom_match_tag_regex(self, event_tag, search_tag):\n return self._incoming_bus.cache_regex.get(search_tag).search(event_tag)\n\n def _match_event(self, event):\n for matcher in self._event_matchers:\n match = matcher[\"match_func\"](event[\"tag\"], matcher[\"tag\"]) \n if not match:\n continue\n\n if DEBUG:\n log.debug(\"Matched event: %s\", repr(event))\n\n matcher[\"func\"](event, match=match)\n\n def register_reactor(self, reactor, func, match_type=\"startswith\"):\n \"\"\"\n Register a reactor and register it as an event matcher. A wrapper function for register_event_matcher.\n \"\"\"\n\n self._reactors.append(reactor)\n self.register_event_matcher(reactor[match_type], func, match_type=match_type)\n\n def register_event_matcher(self, tag, func, match_type=\"startswith\"):\n \"\"\"\n Register additional event matchers to catch other events.\n \"\"\"\n\n em = {\n \"tag\": tag,\n \"match_type\": match_type,\n \"match_func\": self._custom_match_tag_regex if match_type == \"regex\" else self._incoming_bus._get_match_func(match_type),\n \"func\": func,\n }\n self._event_matchers.append(em)\n\n def manage_workflow(self, message):\n \"\"\"\n Administration workflow to query and manage this processor instance.\n\n Supported commands:\n - context [key]... [value=]\n - context [key]... [default=]\n - reactor list|show \n \"\"\"\n\n args = message.get(\"args\", [])\n kwargs = message.get(\"kwargs\", {})\n\n if len(args) > 0 and args[0] == \"context\":\n res = self._context\n\n if len(args) > 1:\n for is_last, key in last_iter(args[1:]):\n if \"value\" in kwargs:\n if is_last:\n res[key] = kwargs[\"value\"]\n res = res[key]\n else:\n res = res.setdefault(key, {})\n else:\n if \"default\" not in kwargs:\n res = res[key]\n else:\n res = res.get(key, kwargs[\"default\"])\n\n res = ensure_primitive(res)\n if isinstance(res, dict):\n return res\n elif isinstance(res, (list, set, tuple)):\n return {\"values\": res}\n else:\n return {\"value\": res}\n elif len(args) > 1 and args[0] == \"reactor\":\n if args[1] == \"list\":\n return {\n \"values\": [r[\"name\"] for r in self._reactors],\n }\n elif args[1] == \"show\":\n if len(args) > 2 and args[2] != \"*\":\n reactors = [r for r in self._reactors if args[2] in r[\"name\"]]\n else:\n reactors = [r for r in self._reactors]\n\n return {\n \"value\": {r[\"name\"]: r for r in reactors}\n }\n else:\n return super(EventDrivenMessageProcessor, self).manage_workflow(message)\n\n def run(self):\n \"\"\"\n Blocking method that processes all received events.\n \"\"\"\n\n # Ensure all worker threads with auto start enabled are started\n threads = self.worker_threads.do_for_all(lambda t: t.auto_start, lambda t: t.start())\n if threads:\n log.info(\"Starting {:d} worker thread(s): {:s}\".format(len(threads), \", \".join([t.name for t in threads])))\n\n # Listen for incoming messages\n if DEBUG:\n log.debug(\"Listening for incoming events using %d registered event matcher(s)\", len(self._event_matchers))\n\n try:\n for event in self._incoming_bus.iter_events(full=True, auto_reconnect=True):\n if not event:\n log.warn(\"Skipping empty event\")\n continue\n\n try:\n self._match_event(event)\n except Exception:\n log.exception(\"Failed to process received event: {:}\".format(event))\n finally:\n\n # Ensure all worker threads are killed\n threads = self.worker_threads.do_for_all_by(\"*\", lambda t: t.kill(), force_wildcard=True)\n if threads:\n log.info(\"Killing all worker thread(s): {:s}\".format(\", \".join([t.name for t in threads])))\n\n def process_event(self, event, **kwargs):\n \"\"\"\n Process a received event.\n \"\"\"\n\n res = None\n\n try:\n\n # Extract message from event\n message = event[\"data\"]\n\n # Add reference to original event tag\n # (used to get correlation id when/if sending back reply)\n message[\"_event_tag\"] = event[\"tag\"]\n\n # Process message\n res = self.process(message)\n\n except Exception as ex:\n log.exception(\"Exception while processing event: {:}\".format(event))\n\n res = {\n \"error\": str(ex)\n }\n finally:\n\n # Send back reply event\n if res != None:\n self.send_reply_event_for(message, res)\n else:\n log.warn(\"No reply to send back for event: {:}\".format(event))\n\n def trigger_event(self, data, tag, skip_duplicates_filter=None):\n \"\"\"\n Trigger an outgoing event.\n \"\"\"\n\n # Check for duplicates to skip\n if skip_duplicates_filter != None:\n skip_duplicates_filter = \"dupl:{:}\".format(skip_duplicates_filter)\n if (tag, data) == self._outgoing_event_filters.get(skip_duplicates_filter, None):\n if DEBUG:\n log.debug(\"Skipping duplicate event with tag '{:s}': {:}\".format(tag, data))\n\n return\n\n log.info(\"Triggering event '{:s}': {:}\".format(tag, data))\n\n with self._bus_lock: # Synchronize just to be safe\n self._outgoing_bus.fire_event(data.copy(), tag)\n\n # Register last event for duplicate filter\n if skip_duplicates_filter != None:\n self._outgoing_event_filters[skip_duplicates_filter] = (tag, data)\n\n def subscribe_to_events(self, tag, match_type=\"startswith\"):\n \"\"\"\n Decorator to let a function subscribe to events matching specified tag pattern.\n \"\"\"\n\n def decorator(func):\n self._event_matchers.append({\n \"tag\": tag,\n \"match_type\": match_type,\n \"match_func\": None,\n \"func\": func,\n })\n\n return func\n\n return decorator\n\n def send_reply_event_for(self, message, data):\n \"\"\"\n Send back reply data for a received message.\n \"\"\"\n\n # Extract correlation id from original event\n match = self._tag_regex.match(message[\"_event_tag\"])\n groups = match.groupdict()\n tag = \"{:s}/res/{:s}\".format(self._namespace, groups[\"id\"])\n\n if DEBUG:\n log.debug(\"Sending reply mesage with tag '{:s}': {:}\".format(tag, data))\n\n # Send reply event\n with self._bus_lock: # Synchronize just to be safe\n self._outgoing_bus.fire_event(data, tag)\n\n #region Built-in hooks\n\n def reply_returner(self, message, result):\n self.send_reply_event_for(message, result)\n\n #endregion\n\n\nclass EventDrivenMessageClient(object):\n\n def __init__(self, namespace, default_timeout=30):\n self._namespace = namespace\n self._default_timeout = default_timeout\n\n def init(self, opts):\n self._opts = opts\n\n def send_sync(self, message, timeout=None):\n\n if timeout == None:\n timeout = message.get(\"timeout\", self._default_timeout)\n\n correlation_id = uuid.uuid4()\n \n req_tag = \"{:s}/req/{:s}\".format(self._namespace, correlation_id)\n res_tag = \"{:s}/res/{:s}\".format(self._namespace, correlation_id)\n\n bus = salt.utils.event.get_event(\"minion\",\n opts=self._opts,\n transport=self._opts[\"transport\"],\n listen=True)\n\n try:\n bus.subscribe(tag=res_tag, match_type=\"startswith\")\n\n if DEBUG:\n log.debug(\"Sending request message with tag '%s': %s\", req_tag, message)\n\n bus.fire_event(message, req_tag)\n\n reply = self._recv_reply(bus, timeout=timeout, tag=res_tag, match_type=\"startswith\")\n\n return reply\n\n finally:\n try:\n bus.destroy()\n except:\n log.exception(\"Unable to destroy event bus\")\n\n def _recv_reply(self, bus, timeout=None, **kwargs):\n\n # Determine timeout\n timeout = timeout or self._default_timeout\n\n # Wait for message until timeout\n message = bus.get_event(wait=timeout, **kwargs)\n if not message:\n log.warn(\"No reply message with tag '%s' received within timeout of %d secs\", kwargs.get(\"tag\", None), timeout)\n\n raise salt.exceptions.CommandExecutionError(\n \"No reply message received within timeout of {:d} secs - please try again and maybe increase timeout value\".format(timeout))\n\n # Check for error\n if \"error\" in message:\n if isinstance(message[\"error\"], dict):\n raise SuperiorCommandExecutionError(str(message[\"error\"]), data=message[\"error\"])\n raise salt.exceptions.CommandExecutionError(message[\"error\"])\n\n return message\n\n\ndef msg_pack(*args, **kwargs):\n \"\"\"\n Helper method to pack message into dict.\n \"\"\"\n\n msg = {}\n if args:\n msg[\"args\"] = args\n if kwargs:\n for k, v in kwargs.iteritems():\n if k.startswith(\"__\"): # Filter out Salt params (__pub_*)\n continue\n\n if k.startswith(\"_\"):\n msg[k.lstrip(\"_\")] = v\n else:\n if not \"kwargs\" in msg:\n msg[\"kwargs\"] = {}\n\n msg[\"kwargs\"][k] = v\n\n return msg\n\n\ndef keyword_resolve(data, keywords={}, symbol=\"$\"):\n \"\"\"\n Helper method to resolve keywords in a data structure.\n \"\"\"\n\n if isinstance(data, (list, tuple, set)):\n for idx, val in enumerate(data):\n data[idx] = keyword_resolve(val, keywords)\n\n if isinstance(data, dict):\n res = {}\n for key, val in data.iteritems():\n res[keyword_resolve(key, keywords)] = keyword_resolve(val, keywords)\n data = res\n\n elif isinstance(data, basestring) and symbol in data:\n\n # Replace keywords in data\n for key in keywords:\n data = data.replace(\"{:s}{:s}\".format(symbol, key), \"__{:s}__\".format(key))\n\n return eval(data, {\"__{:s}__\".format(key): val for key, val in keywords.iteritems()})\n\n return data\n\n\ndef extract_error_from(result):\n \"\"\"\n Helper function to extract error from a result.\n \"\"\"\n\n if not result:\n log.error(\"Cannot attempt to extract error from an empty result: {:}\".format(result))\n\n return\n\n return result if isinstance(result, Exception) else result.get(\"error\", None) if isinstance(result, dict) else result\n\n\ndef filter_out_unchanged(result, context={}, kind=None):\n \"\"\"\n Helper function to filter out unchanged results recursively based on their specified types.\n \"\"\"\n\n # Build qualified type string for the result\n kind = \".\".join(filter(None, [kind, result.get(\"_type\", None)]))\n\n # Loop through all keys in the result and build entry with the significant alternating values\n entry = {}\n for key, val in result.iteritems():\n\n # Skip all meta/hidden\n if key.startswith(\"_\"):\n continue\n\n # Dive into list in an attempt to filter it\n if isinstance(val, list):\n\n vals = []\n for res in val:\n\n # Recursive handling of dictionary values\n if isinstance(res, dict):\n sub_res = filter_out_unchanged(res, context=context, kind=kind)\n if sub_res:\n vals.append(sub_res)\n\n # Special handling of primitive values - they are always added\n else:\n vals.append(res)\n\n # Ensure primitive values are also added to entry\n entry[key] = vals\n\n # Set filtered values on result\n result[key] = vals\n\n # Ordinary primitive or dictionary value\n else:\n entry[key] = val\n\n # Do we have a type and an entry with one or more significant alternating values?\n if kind and entry:\n\n # Compare if entry equals content cached in context\n if context.get(kind, None) == entry:\n\n # Skip entry when equal to cached\n return\n\n # Otherwise update cache with recent content\n else:\n context[kind] = entry\n\n return result\n\n","repo_name":"autopi-io/autopi-core","sub_path":"src/salt/base/ext/_utils/messaging.py","file_name":"messaging.py","file_ext":"py","file_size_in_byte":41942,"program_lang":"python","lang":"en","doc_type":"code","stars":142,"dataset":"github-code","pt":"21"} +{"seq_id":"42217497924","text":"import pygame\nfrom core.levels.level1 import Level1\nfrom core.sprites import Event_block, LevelSubArea, Player, PlayerVisual\nfrom core.states.battlestate import BattleState\nfrom core.states.statemachine import GameState\nfrom core.colors import *\nfrom core.files import *\n\nclass Controls(object):\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.up = False\n self.down = False\n self.left = False\n self.right = False\n \n def gety(self):\n if self.up and self.down:\n return 0\n\n if self.up:\n return -1\n\n if self.down:\n return 1\n \n return 0\n\n def getx(self):\n if self.left and self.right:\n return 0\n\n if self.left:\n return -1\n\n if self.right:\n return 1\n \n return 0\n\nclass MainState(GameState):\n def __init__(self, screen):\n super(MainState, self).__init__()\n start_location = (17, 6)\n self.player = Player((tile_size*(start_location[0] - 1)),(tile_size*(start_location[1] - 1)))\n self.player_visual = PlayerVisual(player_walk, player_run)\n self.player_visual.set_position( screen_width/2, (screen_height/2)-6)\n self.subareas: list[LevelSubArea] = []\n self.current_level = Level1(self.player, screen)\n\n litteroot_town = LevelSubArea(\"Litteroot Town\", transparent_alpha, pygame.mixer.Channel(1), (tile_size*0,tile_size*12,tile_size*60,tile_size*40))\n litteroot_town.set_music(\"music/litteroot_intro.ogg\",\"music/litteroot_loop.ogg\")\n self.current_level.add_sprites(\"location\", [litteroot_town])\n self.subareas.append(litteroot_town)\n\n route_101 = LevelSubArea(\"Route 101\", transparent_alpha, pygame.mixer.Channel(2), (0,0,tile_size*60,tile_size*12))\n route_101.set_music(\"music/Route_101_intro.ogg\",\"music/Route_101_loop.ogg\")\n self.current_level.add_sprites(\"location\", [route_101])\n self.subareas.append(route_101)\n \n self.player.level = self.current_level\n self.screen = screen\n self.slow = 1\n self.fast = 3\n self.active_object_list = pygame.sprite.Group()\n self.active_object_list.add(self.player, self.player_visual)\n self.controls = Controls()\n\n def onEnable(self):\n print(\"Main State was enabled!\")\n self.make_player_walk()\n self.controls.reset()\n self.player_visual.change_direction(0, 0)\n\n def onDisable(self):\n print(\"Main State was disabled!\")\n\n def make_player_run(self):\n self.player_visual.run = True\n self.player_visual.fps = self.player_visual.fast_fps\n self.player.change_speed(self.fast)\n\n def make_player_walk(self):\n self.player_visual.run = False\n self.player_visual.fps = self.player_visual.slow_fps\n self.player.change_speed(self.slow)\n\n def switch_to_battle(self):\n Event_block.switch_mode = False\n self.controls.reset()\n pygame.image.save(self.screen, \"images/screenshot.png\")\n self.activateNewState(BattleState(self.screen))\n\n for subarea in self.subareas:\n subarea.disable()\n\n print(\"disabled\")\n\n def onUpdate(self, deltaTime, events):\n if Event_block.switch_mode:\n self.switch_to_battle()\n return\n\n for event in events:\n if (event.type == pygame.KEYDOWN):\n if(event.key == pygame.K_q):\n self.make_player_run()\n if(event.key == pygame.K_e):\n self.make_player_walk()\n if (event.type == pygame.KEYDOWN):\n k = event.key\n if(k == pygame.K_LEFT or k == pygame.K_a):\n for x in range(0,(len(self.current_level.queue_list.sprites()))):\n (self.current_level.queue_list.sprites())[x].entity_collision(self.player)\n self.controls.left = True\n elif(k == pygame.K_RIGHT or k == pygame.K_d):\n for x in range(0,(len(self.current_level.queue_list.sprites()))):\n (self.current_level.queue_list.sprites())[x].entity_collision(self.player)\n self.controls.right = True\n elif(k == pygame.K_UP or k == pygame.K_w):\n for x in range(0,(len(self.current_level.queue_list.sprites()))):\n (self.current_level.queue_list.sprites())[x].entity_collision(self.player)\n self.controls.up = True\n elif(k == pygame.K_DOWN or k == pygame.K_s):\n for x in range(0,(len(self.current_level.queue_list.sprites()))):\n (self.current_level.queue_list.sprites())[x].entity_collision(self.player)\n self.controls.down = True\n elif (event.type == pygame.KEYUP):\n k = event.key\n if(k == pygame.K_LEFT or k == pygame.K_a):\n self.controls.left = False\n elif(k == pygame.K_RIGHT or k == pygame.K_d):\n self.controls.right = False\n elif(k == pygame.K_UP or k == pygame.K_w):\n self.controls.up = False\n elif(k == pygame.K_DOWN or k == pygame.K_s):\n self.controls.down = False\n\n for subarea in self.subareas:\n subarea.entity_collision(self.player)\n\n self.player.direction[0] = self.controls.getx();\n self.player.direction[1] = self.controls.gety();\n self.player.update(self.current_level.collidable_list, deltaTime)\n self.player_visual.change_direction(self.player.direction[0], self.player.direction[1])\n self.player_visual.update()\n\n self.current_level.update()\n self.current_level.scroll()\n self.current_level.draw(self.screen)\n self.active_object_list.draw(self.screen)\n\n","repo_name":"knnth3/Pythamon","sub_path":"Pokemon_Emerald/core/states/mainstate.py","file_name":"mainstate.py","file_ext":"py","file_size_in_byte":5935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"815662111","text":"import cmath\nimport math\nimport numpy as np\n\n\ndef convertToPolar(complexNumber):\n module, angleRad = cmath.polar(complexNumber)\n angle = math.degrees(angleRad)\n return module, angle\n\n\ndef getMatrices(Ecuation1, Ecuation2, Ecuation3):\n Principal_Matriz = np.array([[Ecuation1[\"Intensity_1_1\"], Ecuation1[\"Intensity_2_1\"], Ecuation1[\"Intensity_3_1\"]],\n [Ecuation2[\"Intensity_1_2\"], Ecuation2[\"Intensity_2_2\"], Ecuation2[\"Intensity_3_2\"]],\n [Ecuation3[\"Intensity_1_3\"], Ecuation3[\"Intensity_2_3\"], Ecuation3[\"Intensity_3_3\"]]])\n Matriz_Intensiad_1 = np.array([[Ecuation1[\"Voltaje_1\"], Ecuation1[\"Intensity_2_1\"], Ecuation1[\"Intensity_3_1\"]],\n [Ecuation2[\"Voltaje_2\"], Ecuation2[\"Intensity_2_2\"], Ecuation2[\"Intensity_3_2\"]],\n [Ecuation3[\"Voltaje_3\"], Ecuation3[\"Intensity_2_3\"], Ecuation3[\"Intensity_3_3\"]]])\n Matriz_Intensiad_2 = np.array([[Ecuation1[\"Intensity_1_1\"], Ecuation1[\"Voltaje_1\"], Ecuation1[\"Intensity_3_1\"]],\n [Ecuation2[\"Intensity_1_2\"], Ecuation2[\"Voltaje_2\"], Ecuation2[\"Intensity_3_2\"]],\n [Ecuation3[\"Intensity_1_3\"], Ecuation3[\"Voltaje_3\"], Ecuation3[\"Intensity_3_3\"]]])\n Matriz_Intensiad_3 = np.array([[Ecuation1[\"Intensity_1_1\"], Ecuation1[\"Intensity_2_1\"], Ecuation1[\"Voltaje_1\"]],\n [Ecuation2[\"Intensity_1_2\"], Ecuation2[\"Intensity_2_2\"], Ecuation2[\"Voltaje_2\"]],\n [Ecuation3[\"Intensity_1_3\"], Ecuation3[\"Intensity_2_3\"], Ecuation3[\"Voltaje_3\"]]])\n return Principal_Matriz, Matriz_Intensiad_1, Matriz_Intensiad_2, Matriz_Intensiad_3\n","repo_name":"RicardoGH29/Calc3_3matrizElectrica","sub_path":"fuctions.py","file_name":"fuctions.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18462207369","text":"import scipy.io\r\nimport csv\r\nimport numpy as np\r\nfrom pathlib import Path\r\nimport matplotlib.pyplot as plt\r\nfrom delay_functions import calculate_delays\r\nfrom location_functions import calculate_locations, get_locations\r\n\r\ndef get_matfile(name):\r\n\tdirname = Path(__file__).parent.parent\r\n\treturn (dirname / name).resolve()\r\n\r\ndef export_to_csv(data, name):\r\n file_name = '../csv_files/' + name + '.csv'\r\n with open(file_name, mode='w', newline='') as data_file:\r\n data_write = csv.writer(data_file, delimiter=';', quotechar='\"', quoting=csv.QUOTE_MINIMAL) \r\n data_write.writerow(['x','y'])\r\n for i in range(data.size):\r\n data_write.writerow([data[i][0].round(2), data[i][1].round(2)])\r\n \r\ndef print_locations(name, locations):\r\n print('Locaties van ' + name)\r\n for i in range(locations.size):\r\n text = ' locatie ' + str(i+1) + ':\\n x = ' + str(locations[i][0]) + '\\n y = ' + str(locations[i][1])\r\n print(text)\r\n i == locations.size-1 and print('')\r\n \r\ndef plot_path(x_array, y_array, marker, color, title):\r\n plt.figure(figsize=(8, 8))\r\n plt.grid(b=True)\r\n plt.xlim(left=-1)\r\n plt.xlim(right=7)\r\n plt.ylim(bottom=0)\r\n plt.ylim(top=5)\r\n plt.xlabel('plaats x (m)')\r\n plt.ylabel('plaats y (m)')\r\n plt.title(label=title)\r\n plt.scatter(0,1, marker=\"o\", color='g', label='station')\r\n plt.plot(x_array, y_array, marker=marker, color=color, label='baan drone')\r\n plt.legend()\r\n\r\nmat1 = scipy.io.loadmat(get_matfile(\"Dataset_1.mat\"))\r\nmat2 = scipy.io.loadmat(get_matfile(\"Dataset_2.mat\"))\r\n\r\ndelays1_a = calculate_delays(mat1.get('H'), venster_on=1, title='Dataset 1 met venster') # dataset 1 met venster\r\ndelays1_b = calculate_delays(mat1.get('H'), venster_on=0, title='Dataset 1 zonder venster') # dataset 1 zonder venster\r\ndelays2_a = calculate_delays(mat2.get('H'), venster_on=1, title='Dataset 2 met venster') # dataset 2 met venster\r\ndelays2_b = calculate_delays(mat2.get('H'), venster_on=0, title='Dataset 2 zonder venster') # dataset 2 zonder venster\r\n\r\nloc_array = get_locations()\r\nloc_x_array = np.asarray([None] * 24)\r\nloc_y_array = np.asarray([None] * 24)\r\nfor i in range(loc_array.size):\r\n loc_x_array[i] = loc_array[i][0]\r\n loc_y_array[i] = loc_array[i][1]\r\n\r\nlocations1_a = calculate_locations(delays1_a)\r\nlocations1_a_x = np.asarray([None] * 24)\r\nlocations1_a_y = np.asarray([None] * 24)\r\nfor i in range(locations1_a.size):\r\n locations1_a_x[i] = locations1_a[i][0]\r\n locations1_a_y[i] = locations1_a[i][1]\r\n\r\nlocations2_a = calculate_locations(delays2_a)\r\nlocations2_a_x = np.asarray([None] * 24)\r\nlocations2_a_y = np.asarray([None] * 24)\r\nfor i in range(locations2_a.size):\r\n locations2_a_x[i] = locations2_a[i][0]\r\n locations2_a_y[i] = locations2_a[i][1]\r\n \r\nlocations1_b = calculate_locations(delays1_b)\r\nlocations1_b_x = np.asarray([None] * 24)\r\nlocations1_b_y = np.asarray([None] * 24)\r\nfor i in range(locations1_b.size):\r\n locations1_b_x[i] = abs(locations1_b[i][0]) # Hier wordt de absolute waarde genomen omdat er complexe waarden tussen zitten die zorgen voor problemen\r\n locations1_b_y[i] = abs(locations1_b[i][1])\r\n\r\nlocations2_b = calculate_locations(delays2_b)\r\nlocations2_b_x = np.asarray([None] * 24)\r\nlocations2_b_y = np.asarray([None] * 24)\r\nfor i in range(locations2_b.size):\r\n locations2_b_x[i] = abs(locations2_b[i][0]) # Hier wordt de absolute waarde genomen omdat er complexe waarden tussen zitten die zorgen voor problemen\r\n locations2_b_y[i] = abs(locations2_b[i][1])\r\n\r\nerrors1_a = abs(locations1_a - loc_array)\r\nerrors1_b = abs(locations1_b - loc_array)\r\nerrors2_a = abs(locations2_a - loc_array)\r\nerrors2_b = abs(locations2_b - loc_array)\r\n\r\n# =============================================================================\r\n# CSV bestanden maken\r\n# =============================================================================\r\nexport_to_csv(loc_array, 'original_coordinates')\r\nexport_to_csv(locations1_a, 'locations1_a_coordinates')\r\nexport_to_csv(locations1_b, 'locations1_b_coordinates')\r\nexport_to_csv(locations2_a, 'locations2_a_coordinates')\r\nexport_to_csv(locations2_b, 'locations2_b_coordinates')\r\nexport_to_csv(errors1_a, 'errors1_a')\r\nexport_to_csv(errors1_b, 'errors1_b')\r\nexport_to_csv(errors2_a, 'errors2_a')\r\nexport_to_csv(errors2_b, 'errors2_b')\r\n\r\n# =============================================================================\r\n# Print de locaties uit\r\n# =============================================================================\r\nprint_locations('dataset 1', locations1_a)\r\nprint_locations('dataset 2', locations2_a)\r\n\r\n# =============================================================================\r\n# Plot de baan die drone beschrijft\r\n# =============================================================================\r\nplot_path(loc_x_array, loc_y_array, marker='o', color='k', title='Origineel pad')\r\nplot_path(locations1_b_x, locations1_b_y, marker='s', color='r', title='Dataset 1 zonder venster')\r\nplot_path(locations1_a_x, locations1_a_y, marker='s', color='b', title='Dataset 1 met venster')\r\nplot_path(locations2_b_x, locations2_b_y, marker='D', color='r', title='Dataset 2 zonder venster')\r\nplot_path(locations2_a_x, locations2_a_y, marker='D', color='b', title='Dataset 2 met venster')\r\n","repo_name":"RobbeVer/LocatieBepaling","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9918173054","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch\nimport torch.nn as nn\nfrom .utils import _tranpose_and_gather_feat\nimport torch.nn.functional as F\n\n\ndef _slow_neg_loss(pred, gt):\n '''focal loss from CornerNet'''\n pos_inds = gt.eq(1)\n neg_inds = gt.lt(1)\n\n neg_weights = torch.pow(1 - gt[neg_inds], 4)\n\n loss = 0\n pos_pred = pred[pos_inds]\n neg_pred = pred[neg_inds]\n\n pos_loss = torch.log(pos_pred) * torch.pow(1 - pos_pred, 2)\n neg_loss = torch.log(1 - neg_pred) * torch.pow(neg_pred, 2) * neg_weights\n\n num_pos = pos_inds.float().sum()\n pos_loss = pos_loss.sum()\n neg_loss = neg_loss.sum()\n\n if pos_pred.nelement() == 0:\n loss = loss - neg_loss\n else:\n loss = loss - (pos_loss + neg_loss) / num_pos\n return loss\n\n\ndef _neg_loss(pred, gt):\n ''' Modified focal loss. Exactly the same as CornerNet.\n Runs faster and costs a little bit more memory\n Arguments:\n pred (batch x c x h x w)\n gt_regr (batch x c x h x w)\n '''\n pos_inds = gt.eq(1).float()\n neg_inds = gt.lt(1).float()\n\n neg_weights = torch.pow(1 - gt, 4)\n\n loss = 0\n\n pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds\n neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds\n\n num_pos = pos_inds.float().sum()\n pos_loss = pos_loss.sum()\n neg_loss = neg_loss.sum()\n\n if num_pos == 0:\n loss = loss - neg_loss\n else:\n loss = loss - (pos_loss + neg_loss) / num_pos\n return loss\n\ndef _not_faster_neg_loss(pred, gt):\n pos_inds = gt.eq(1).float()\n neg_inds = gt.lt(1).float() \n num_pos = pos_inds.float().sum()\n neg_weights = torch.pow(1 - gt, 4)\n\n loss = 0\n trans_pred = pred * neg_inds + (1 - pred) * pos_inds\n weight = neg_weights * neg_inds + pos_inds\n all_loss = torch.log(1 - trans_pred) * torch.pow(trans_pred, 2) * weight\n all_loss = all_loss.sum()\n\n if num_pos > 0:\n all_loss /= num_pos\n loss -= all_loss\n return loss\n\ndef _slow_reg_loss(regr, gt_regr, mask):\n num = mask.float().sum()\n mask = mask.unsqueeze(2).expand_as(gt_regr)\n\n regr = regr[mask]\n gt_regr = gt_regr[mask]\n \n regr_loss = nn.functional.smooth_l1_loss(regr, gt_regr, size_average=False)\n regr_loss = regr_loss / (num + 1e-4)\n return regr_loss\n\ndef _reg_loss(regr, gt_regr, mask):\n ''' L1 regression loss\n Arguments:\n regr (batch x max_objects x dim)\n gt_regr (batch x max_objects x dim)\n mask (batch x max_objects)\n '''\n num = mask.float().sum()\n mask = mask.unsqueeze(2).expand_as(gt_regr).float()\n\n regr = regr * mask\n gt_regr = gt_regr * mask\n \n regr_loss = nn.functional.smooth_l1_loss(regr, gt_regr, size_average=False)\n regr_loss = regr_loss / (num + 1e-4)\n return regr_loss\n\nclass FocalLoss(nn.Module):\n '''nn.Module warpper for focal loss'''\n def __init__(self):\n super(FocalLoss, self).__init__()\n self.neg_loss = _neg_loss\n\n def forward(self, out, target):\n return self.neg_loss(out, target)\n\nclass RegLoss(nn.Module):\n '''Regression loss for an output tensor\n Arguments:\n output (batch x dim x h x w)\n mask (batch x max_objects)\n ind (batch x max_objects)\n target (batch x max_objects x dim)\n '''\n def __init__(self):\n super(RegLoss, self).__init__()\n \n def forward(self, output, mask, ind, target):\n pred = _tranpose_and_gather_feat(output, ind)\n loss = _reg_loss(pred, target, mask)\n return loss\n\nclass RegL1Loss(nn.Module):\n def __init__(self):\n super(RegL1Loss, self).__init__()\n \n def forward(self, output, mask, ind, target):\n pred = _tranpose_and_gather_feat(output, ind)\n mask = mask.unsqueeze(2).expand_as(pred).float()\n # loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean')\n loss = F.l1_loss(pred * mask, target * mask, size_average=False)\n loss = loss / (mask.sum() + 1e-4)\n return loss\n\nclass NormRegL1Loss(nn.Module):\n def __init__(self):\n super(NormRegL1Loss, self).__init__()\n \n def forward(self, output, mask, ind, target):\n pred = _tranpose_and_gather_feat(output, ind)\n mask = mask.unsqueeze(2).expand_as(pred).float()\n # loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean')\n pred = pred / (target + 1e-4)\n target = target * 0 + 1\n loss = F.l1_loss(pred * mask, target * mask, size_average=False)\n loss = loss / (mask.sum() + 1e-4)\n return loss\n\nclass RegWeightedL1Loss(nn.Module):\n def __init__(self):\n super(RegWeightedL1Loss, self).__init__()\n \n def forward(self, output, mask, ind, target):\n pred = _tranpose_and_gather_feat(output, ind)\n mask = mask.float()\n # loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean')\n loss = F.l1_loss(pred * mask, target * mask, size_average=False)\n loss = loss / (mask.sum() + 1e-4)\n return loss\n\nclass L1Loss(nn.Module):\n def __init__(self):\n super(L1Loss, self).__init__()\n \n def forward(self, output, mask, ind, target):\n pred = _tranpose_and_gather_feat(output, ind)\n mask = mask.unsqueeze(2).expand_as(pred).float()\n loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean')\n return loss\n\nclass BinRotLoss(nn.Module):\n def __init__(self):\n super(BinRotLoss, self).__init__()\n \n def forward(self, output, mask, ind, rotbin, rotres):\n pred = _tranpose_and_gather_feat(output, ind)\n loss = compute_rot_loss(pred, rotbin, rotres, mask)\n return loss\n\ndef compute_res_loss(output, target):\n return F.smooth_l1_loss(output, target, reduction='elementwise_mean')\n\n# TODO: weight\ndef compute_bin_loss(output, target, mask):\n mask = mask.expand_as(output)\n output = output * mask.float()\n return F.cross_entropy(output, target, reduction='elementwise_mean')\n\ndef compute_rot_loss(output, target_bin, target_res, mask):\n # output: (B, 128, 8) [bin1_cls[0], bin1_cls[1], bin1_sin, bin1_cos, \n # bin2_cls[0], bin2_cls[1], bin2_sin, bin2_cos]\n # target_bin: (B, 128, 2) [bin1_cls, bin2_cls]\n # target_res: (B, 128, 2) [bin1_res, bin2_res]\n # mask: (B, 128, 1)\n # import pdb; pdb.set_trace()\n output = output.view(-1, 8)\n target_bin = target_bin.view(-1, 2)\n target_res = target_res.view(-1, 2)\n mask = mask.view(-1, 1)\n loss_bin1 = compute_bin_loss(output[:, 0:2], target_bin[:, 0], mask)\n loss_bin2 = compute_bin_loss(output[:, 4:6], target_bin[:, 1], mask)\n loss_res = torch.zeros_like(loss_bin1)\n if target_bin[:, 0].nonzero().shape[0] > 0:\n idx1 = target_bin[:, 0].nonzero()[:, 0]\n valid_output1 = torch.index_select(output, 0, idx1.long())\n valid_target_res1 = torch.index_select(target_res, 0, idx1.long())\n loss_sin1 = compute_res_loss(\n valid_output1[:, 2], torch.sin(valid_target_res1[:, 0]))\n loss_cos1 = compute_res_loss(\n valid_output1[:, 3], torch.cos(valid_target_res1[:, 0]))\n loss_res += loss_sin1 + loss_cos1\n if target_bin[:, 1].nonzero().shape[0] > 0:\n idx2 = target_bin[:, 1].nonzero()[:, 0]\n valid_output2 = torch.index_select(output, 0, idx2.long())\n valid_target_res2 = torch.index_select(target_res, 0, idx2.long())\n loss_sin2 = compute_res_loss(\n valid_output2[:, 6], torch.sin(valid_target_res2[:, 1]))\n loss_cos2 = compute_res_loss(\n valid_output2[:, 7], torch.cos(valid_target_res2[:, 1]))\n loss_res += loss_sin2 + loss_cos2\n return loss_bin1 + loss_bin2 + loss_res\n\n\nclass TripletLoss(nn.Module):\n \"\"\"Triplet loss with hard positive/negative mining.\n Reference:\n Hermans et al. In Defense of the Triplet Loss for Person Re-Identification. arXiv:1703.07737.\n Code imported from https://github.com/Cysu/open-reid/blob/master/reid/loss/triplet.py.\n Args:\n margin (float): margin for triplet.\n \"\"\"\n\n def __init__(self, margin=0.3, mutual_flag=False):\n super(TripletLoss, self).__init__()\n self.margin = margin\n self.ranking_loss = nn.MarginRankingLoss(margin=margin)\n self.mutual = mutual_flag\n\n def forward(self, inputs, targets):\n \"\"\"\n Args:\n inputs: feature matrix with shape (batch_size, feat_dim)\n targets: ground truth labels with shape (num_classes)\n \"\"\"\n n = inputs.size(0)\n # inputs = 1. * inputs / (torch.norm(inputs, 2, dim=-1, keepdim=True).expand_as(inputs) + 1e-12)\n # Compute pairwise distance, replace by the official when merged\n dist = torch.pow(inputs, 2).sum(dim=1, keepdim=True).expand(n, n)\n dist = dist + dist.t()\n dist.addmm_(1, -2, inputs, inputs.t())\n dist = dist.clamp(min=1e-12).sqrt() # for numerical stability\n # For each anchor, find the hardest positive and negative\n mask = targets.expand(n, n).eq(targets.expand(n, n).t())\n dist_ap, dist_an = [], []\n for i in range(n):\n dist_ap.append(dist[i][mask[i]].max().unsqueeze(0))\n dist_an.append(dist[i][mask[i] == 0].min().unsqueeze(0))\n dist_ap = torch.cat(dist_ap)\n dist_an = torch.cat(dist_an)\n # Compute ranking hinge loss\n y = torch.ones_like(dist_an)\n loss = self.ranking_loss(dist_an, dist_ap, y)\n if self.mutual:\n return loss, dist\n return loss\n","repo_name":"microsoft/computervision-recipes","sub_path":"utils_cv/tracking/references/fairmot/models/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":9322,"program_lang":"python","lang":"en","doc_type":"code","stars":9169,"dataset":"github-code","pt":"21"} +{"seq_id":"15961726029","text":"# medium filter algorithm\n\n# import pathlib to test if file exists\nfrom pathlib import Path\n# PIL module to import and export images\nfrom PIL import Image\n# numpy to use efficienct arrays\nimport numpy as np\n# sys to determine arg user input information\nimport sys\n# opencv for the use of median filters\nimport cv2\n\n\ndef user_input():\n '''\n accepts user input for median filter\n including file names and size of filter\n '''\n # test if the proper number of inputs from user\n if (len(sys.argv)) > 2:\n # get file in directory\n filepath = sys.argv[1]\n # get size of median filter\n size_str = sys.argv[2]\n # get file in directory\n img = Path(filepath)\n # determines if file exists in directory\n if img.is_file():\n # splits filepath by period\n t = filepath.split('.')\n # generates filename and extension\n name,ext = '.'.join(t[:-1]),'.'+t[-1]\n # try expression to transform integer\n try:\n # transform size from string to int\n size = int(size_str)\n # if cannot be converted, raises ValueError\n except ValueError:\n # output message for imporper size input\n print('Second arg must be integer')\n # exit program if improper input\n sys.exit(1)\n # condition if try worked successfully\n else:\n # test if filter is odd\n if size % 2 == 0:\n # if not odd, output to user\n print('Filter size must be odd')\n # exit program if not odd\n sys.exit(1)\n # tests if size is too small\n if size < 3:\n # output error message if filter too small\n print('Filter must be 3 or greater')\n # exit program if filter too small\n sys.exit(1)\n # return filename and extension\n return name,ext,size\n # condition if file not found\n else:\n # tell user file not found\n print('File',filepath,'not found')\n # exit program gracefully if no file\n sys.exit(1)\n # condition if no user args\n else:\n # print to user that default image being used\n print('Using default image')\n # return default image filepath and args\n return 'img/valve','.png',3\n\n\ndef init_img(filepath):\n '''\n initializes image\n '''\n # creates image from PIL module as np array\n # and converts it to black and white image\n return np.array(Image.open(filepath).convert('L'))\n\n\ndef save_img(filepath,pixels):\n '''\n outputs the image to same dir as input\n with additional info in file name\n '''\n # splits the image by period\n t = filepath.split('.')\n # appends file to contain algorithm of transformation\n filepath = '.'.join(t[:-1])+'_median.'+t[-1]\n # creates PIL image from numpy array\n img = Image.fromarray(pixels)\n # function to save image to directory\n img.save(filepath)\n\n\ndef median(a,n):\n '''\n get median in series of numbers\n '''\n # sort numbers in incrementing order\n a.sort()\n # return median value of sorted array\n return a[n]\n\n\n# good median blur\ndef apply_median_filter(img,dim):\n '''\n blurs the image using the median filter\n '''\n # gets offset of median filter\n off = int(dim/2)\n # makes copy of the image to be manipulated\n fltrd = img.copy()\n # generage the values for the median filter\n w = np.zeros(dim*dim,dtype=np.uint8)\n # generates value for middle element for filter\n n = int((dim*dim)/2+1)\n # iterate through each row of image\n # up through half of filter\n for i in range(off,len(img)-off):\n # iterate through column of each row\n for j in range(off,len(img[i])-off):\n # iterate through rows of filter\n for s in range(i-off,i+off+1):\n # iterate through column of each row\n for t in range(j-off,j+off+1):\n # places each value of img into filter\n w[(s-i+off)*dim+(t-j+off)] = img[s][t]\n # place median val of filter into \n # designated index of filter\n fltrd[i][j] = median(w,n)\n # return img with median filter\n return fltrd\n\n\ndef blur_opencv(img,size):\n '''\n function to call default opencv blur\n '''\n # returns result of opencv blur function\n # of input filter size\n return cv2.blur(img,(size,size))\n\n\ndef gaussian_blur_opencv(img,size):\n '''\n function to call default opencv gaussian blur\n '''\n # returns result of opencv gaussian blur\n # the input size of filter given as tuple\n return cv2.GaussianBlur(img,(size,size),0)\n\n\ndef median_blur_opencv(img,size):\n '''\n function to call opencv median blur\n '''\n # returns result of median blur\n # based on input filter size\n return cv2.medianBlur(img,size)\n\n\ndef driver():\n '''\n driver calls the various assorted\n algorithms of the program\n and saves the images to dir\n '''\n # collect user input information\n name,ext,size = user_input()\n # initialize the image\n img = init_img(name+ext)\n # calls homemade median filter algorithm\n fltrd = apply_median_filter(img,size)\n # saves homemade median result image\n save_img(name+''+ext,fltrd)\n # calls opencv median blur\n blur_cv2 = blur_opencv(img,size)\n # saves opencv median blur\n save_img(name+'_blur_cv2'+ext,blur_cv2)\n # calls gaussian blur opencv function\n gauss_cv2 = gaussian_blur_opencv(img,size)\n # saves opencv gaussian blur function\n save_img(name+'_gaussian_blur_cv2'+ext,gauss_cv2)\n # calls median blur from opencv\n med_blur_cv2 = median_blur_opencv(img,size)\n # saves opencv median blur function\n save_img(name+'_median_blur_cv2'+ext,med_blur_cv2)\n\n \nif __name__ == '__main__':\n '''\n entry point for the program\n '''\n # calls program driver\n driver()\n\n\n# median filter\n","repo_name":"treyamador/computer_vision","sub_path":"theory/median.py","file_name":"median.py","file_ext":"py","file_size_in_byte":6185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21057269197","text":"from pycocotools.coco import COCO # pip install pycocotools\nimport requests\nimport os\nimport sys\nimport threading\n\ndef makeDirectory(dirName):\n try:\n os.mkdir(dirName)\n print(f\"\\nMade {dirName} Directory.\\n\")\n except:\n pass\n\ndef getImagesFromClassName(className):\n makeDirectory(f'downloaded_images/{className}')\n catIds = coco.getCatIds(catNms=[className])\n imgIds = coco.getImgIds(catIds=catIds )\n images = coco.loadImgs(imgIds)\n\n print(f\"Total Images: {len(images)} for class '{className}'\")\n\n for im in images:\n image_file_name = im['file_name']\n label_file_name = im['file_name'].split('.')[0] + '.txt'\n\n fileExists = os.path.exists(f'downloaded_images/{className}/{image_file_name}')\n if(not fileExists):\n img_data = requests.get(im['coco_url']).content\n annIds = coco.getAnnIds(imgIds=im['id'], catIds=catIds, iscrowd=None)\n anns = coco.loadAnns(annIds) \n print(f\"{className}. Downloading - {image_file_name}\")\n for i in range(len(anns)):\n # Yolo Format: center-x center-y width height\n # All values are relative to the image.\n topLeftX = anns[i]['bbox'][0] / im['width']\n topLeftY = anns[i]['bbox'][1] / im['height']\n width = anns[i]['bbox'][2] / im['width']\n height = anns[i]['bbox'][3] / im['height']\n \n s = \"0 \" + str((topLeftX + (topLeftX + width)) / 2) + \" \" + \\\n str((topLeftY + (topLeftY + height)) / 2) + \" \" + \\\n str(width) + \" \" + \\\n str(height)\n \n if(i < len(anns) - 1):\n s += '\\n'\n \n with open(f'downloaded_images/{className}/{image_file_name}', 'wb') as image_handler:\n image_handler.write(img_data)\n with open(f'downloaded_images/{className}/{label_file_name}', 'w') as label_handler:\n label_handler.write(s)\n else:\n print(f\"{className}. {image_file_name} - Already Downloaded.\")\n\nargumentList = sys.argv\n\nclasses = argumentList[1:]\n\nclasses = [class_name.lower() for class_name in classes] # Converting to lower case\n\n\nif(classes[0] == \"--help\"):\n with open('classes.txt', 'r') as fp:\n lines = fp.readlines()\n print(\"**** Classes ****\\n\")\n [print(x.split('\\n')[0]) for x in lines]\n exit(0) \n\nprint(\"\\nClasses to download: \", classes, end = \"\\n\\n\")\n\nmakeDirectory('downloaded_images')\n\ncoco = COCO('instances_train2017.json')\ncats = coco.loadCats(coco.getCatIds())\nnms=[cat['name'] for cat in cats]\n\nfor name in classes:\n if(name not in nms):\n print(f\"{name} is not a valid class, Skipping.\")\n classes.remove(name)\n\nthreads = []\n\n# Creating threads for every class provided.\nfor i in range(len(classes)):\n t = threading.Thread(target=getImagesFromClassName, args=(classes[i],)) \n threads.append(t)\n \nfor t in threads:\n t.start()\n\nfor t in threads:\n t.join()\n\nprint(\"Done.\")\n","repo_name":"KaranJagtiani/YOLO-Coco-Dataset-Custom-Classes-Extractor","sub_path":"coco-extractor.py","file_name":"coco-extractor.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"21"} +{"seq_id":"18702311470","text":"#!/usr/bin/python3\n# coding=UTF-8\n\nimport os \nimport sys \n\na = [12,11,13,5,6]\n#write an algo for insertion sort\n\ndef sort_insertion(a):\n\n #since 1st elements can't have previous elements got from 1\n for i in range(1,len(a)):\n key = a[i]\n j = i-1\n while j>=0 and key < a[j]:\n a[j+1] = a[j]\n j -= 1\n a[j+1] = key\n\n return a \n\nprint(\"This is the file name\",sys.argv[0])\nprint(\"This is the input given for insertion sort\",sys.argv[1:])\ntry:\n numbers = [float(arg) for arg in sys.argv[1:]]\nexcept Exception as e:\n print(\"Wrong data type in input\",e)\n a = [4,3,2,10,12,1,5,6]\n print(\"By default we are taking this as numput\",a)\n numbers = a\n\nprint(sort_insertion(numbers))\n","repo_name":"warlord-2227/DSA","sub_path":"sorting/insertion_sort.py","file_name":"insertion_sort.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35957284280","text":"from cxcore.thread import Thread\nimport logging\nimport types\n\nclass log:\n\n\tdef __init__(self,bus,disconnect=False):\n\t\tself.bus = bus\n\t\tself.__disconnect = disconnect\n\n\tdef __del__(self):\n\t\ttry:\n\t\t\tif self.__disconnect:\n\t\t\t\tself.bus.put(\"dispatcher2\",{\"disconnect\": self.bus.address})\n\t\texcept:\n\t\t\tpass\n\n\tdef log(self,level,message):\n\t\t'''\n\t\tA function to ease logging via dispatcher2\n\t\t'''\n\t\tself.bus.put(\"logger\",(level,message))\n\nclass logger(Thread):\n\t'''\n\tLogger thread\n\n\tDo not use remote logging, implemented by Python `logging` module.\n\tUse internal messaging to a remote connexion instance, it will go\n\tthrough secure channel.\n\t'''\n\tlevel = {\n\t\t\"critical\":\t(logging.CRITICAL,\tlogging.critical),\n\t\t\"error\":\t(logging.ERROR,\t\tlogging.error),\n\t\t\"warning\":\t(logging.WARNING,\tlogging.warning),\n\t\t\"info\":\t\t(logging.INFO,\t\tlogging.info),\n\t\t\"debug\":\t(logging.DEBUG,\t\tlogging.debug),\n\t}\n\n\tdef __init__(self,name=\"logger\",file=None,level=\"warning\"):\n\t\tThread.__init__(self)\n\t\tself.setName(name)\n\t\tlogging.basicConfig(filename=file, level=self.level[level][0], format='%(asctime)s: (%(levelname)s) %(message)s')\n\t\tself.level[\"info\"][1](\"logger thread %s startup\" % (self.getName()))\n\n\tdef run(self):\n\t\twhile True:\n\t\t\taddr = {}\n\t\t\tmessage = self.bus.get(addr)\n\n\t\t\t# print message\n\n\t\t\tif type(message) == types.DictType:\n\t\t\t\tif \"map\" in message.keys():\n\t\t\t\t\tif message[\"map\"] == \"rpdb\":\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\timport rpdb2\n\t\t\t\t\t\t\tself.level[\"debug\"][1](\"%s: starting rpdb2\" % (self.getName()))\n\t\t\t\t\t\t\trpdb2.start_embedded_debugger(message[\"argv\"][0])\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tself.level[\"debug\"][1](\"%s: no rpdb2 found, continuing w/o debugging\" % (self.getName()))\n\n\n\t\t\telif type(message) == types.StringType:\n\t\t\t\tif self.magic(message):\n\t\t\t\t\treturn\n\n\t\t\telif type(message) == types.TupleType:\n\t\t\t\ts = addr[\"from\"].split(\"@\")\n\t\t\t\tif len(s) > 1:\n\t\t\t\t\tsystem = s[1]\n\t\t\t\telse:\n\t\t\t\t\tsystem = \"local\"\n\t\t\t\tself.level[message[0]][1](\"[%s] %s\" % (system, message[1]))\n\n\tdef magic(self,text):\n\t\tif text == \"shutdown\":\n\t\t\tself.level[\"info\"][1](\"logger thread %s shutdown\" % (self.getName()))\n\t\t\treturn True\n\n\t\telif text in self.level.keys():\n\t\t\tlogging.getLogger().setLevel(self.level[text][0])\n\n\t\treturn False\n","repo_name":"svinota/cx","sub_path":"obsoleted/cxcore/cxcore/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"27080711990","text":"from tarfile import PAX_NAME_FIELDS\r\nimport turtle\r\nimport pandas as pd\r\n\r\ndata = pd.read_csv(\"app_brewery-365 days code\\\\us-states-game-start\\\\50_states.csv\")\r\n\r\nscreen = turtle.Screen()\r\nscreen.title('U.S. States Game')\r\nimage = 'app_brewery-365 days code\\\\us-states-game-start\\\\blank_states_img.gif'\r\nscreen.addshape(image)\r\n\r\nturtle.shape(image)\r\nall_states = data.state.to_list()\r\n\r\n\r\ntimmy = turtle.Turtle()\r\ntimmy.hideturtle()\r\ntimmy.penup()\r\ntimmy.speed('fastest')\r\n\r\nguessed_states = []\r\n\r\nwhile len(guessed_states) < 50:\r\n answer_state = screen.textinput(title=f\"{len(guessed_states)}/50 States Correct\", prompt=\"What's another state's name that you can remember:\").title()\r\n if answer_state in ['Exit', 'Quit', 'No']:\r\n missing_states = []\r\n for state in all_states:\r\n if state not in guessed_states:\r\n missing_states.append(state)\r\n # print(missing_states)\r\n new_data = pd.DataFrame(missing_states)\r\n new_data.to_csv('app_brewery-365 days code\\\\us-states-game-start\\\\states_to_learn.csv')\r\n break\r\n\r\n if answer_state in all_states:\r\n timmy.goto(x=int(data[data.state == answer_state].x), y=int(data[data.state == answer_state].y))\r\n timmy.write(arg=answer_state)\r\n guessed_states.append(answer_state)\r\n\r\n\r\n","repo_name":"TejSingh24/Python-Games-and-Applications","sub_path":"us-states-game-start/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25535769676","text":"from .entidades import *\nimport pygame\n\nclass Hud:\n \"Uniao dos elementos do HUD do jogador\"\n def __init__(self,tamanho_tela):\n tt = tamanho_tela\n self.__vida = Vida(tt[0]*3/50, tt[1]/20)\n self.__tempo = Tempo(tt[0]*11/50, tt[1]/20)\n self.__borrachona = Borrachona(tt[0] * 8 / 20, tt[1] / 20)\n self.__barra_poder = BarraPoder(tt[0]*7/9, tt[1]/12)\n self.__paleta = Paleta(tt[0]*11/20, tt[1]/20)\n self.__poder_armazenado = ArmazenadoPoder(tt[0]*11/20+92, tt[1]/20+35)\n\n def atualizar(self, tela, mapa, dimensoes_tela, tempo, vida, moedas_pegas, paletas_pegas):\n \"Atualiza cada um dos elementos\"\n self.__vida.atualizar_hud(tela, mapa, dimensoes_tela, vida)\n self.__tempo.atualizar_hud(tela, mapa, dimensoes_tela, tempo)\n self.__barra_poder.atualizar_hud(tela, mapa, dimensoes_tela)\n self.__borrachona.atualizar_hud(tela, mapa, dimensoes_tela, moedas_pegas)\n self.__paleta.atualizar_hud(tela, mapa, dimensoes_tela, paletas_pegas)\n self.__poder_armazenado.atualizar_hud(tela, mapa, dimensoes_tela)\n\n\nclass Vida(Estatico):\n \"Indica a vida do jogador\"\n def __init__(self, x: int, y: int):\n altura = 30\n largura = 100\n self.__fonte = pygame.font.SysFont('Arial', 20)\n self.__vida = \"\"\n super().__init__(\"vida\", x, y, altura, largura, \"sprites\", (10, 237, 0))\n\n def renderizar(self, tela, mapa):\n nome = self.nome+\"_\"+str(self.__vida)\n self.sprite.imprimir(tela, nome, self.x, self.y, 0, 0, 0, 0, 0, 0)\n\n def atualizar_hud(self, tela, mapa, dimensoes_tela, vida):\n self.__vida = vida\n self.renderizar(tela, mapa)\n return False\n\n\nclass Tempo(Estatico):\n \"Mostra o tempo restante ate que o jogador perca\"\n\n def __init__(self, x: int, y: int):\n altura = 30\n largura = 70\n self.tempomax = 320\n self.__fonte = pygame.font.SysFont('Arial', 40)\n self.__tempo = 0\n self.__contador = self.__fonte.render('time :' + \" \" + str(self.__tempo), False, (0, 0, 0))\n super().__init__(\"tempo\", x, y, altura, largura, \"sprites\", (160, 160, 160))\n\n def renderizar(self, tela, mapa):\n \"Mostra o timer e altera o sprite da ampulheta quando necessario\"\n #print(self.__tempo, self.tempomax)\n if type(self.__tempo) == int :\n nome = \"tempo_\"+str(int(self.__tempo/max((self.tempomax/5),1)))\n else:\n nome = \"tempo_\"\n self.__contador = self.__fonte.render(str(self.__tempo), False, (0, 0, 0))\n tela.blit(self.__contador, (self.x+70, self.y+35))\n self.sprite.imprimir(tela, nome, self.x, self.y, 0, 0, 0, 0, 0, 0)\n\n def atualizar_hud(self, tela, mapa, dimensoes_tela, tempo):\n \"Atualiza o contador interno dele com o do mapa\"\n self.__tempo = tempo\n self.renderizar(tela, mapa)\n return False\n\n\nclass Borrachona(Estatico):\n \"Contador de Borrachas coletadas\"\n def __init__(self, x: int, y: int):\n altura = 30\n largura = 60\n super().__init__(\"borrachona\", x, y, altura, largura, \"sprites\", (254, 254, 0))\n self.__numero_biscoitos = 0\n self.__fonte = pygame.font.SysFont('Arial', 40)\n self.__escreve_na_tela = \"\"\n\n def renderizar(self, tela, mapa):\n \"Mostra a quantidade de borrachas coletadas\"\n self.__escreve_na_tela = self.__fonte.render(\"x\" + str(self.__numero_biscoitos), False, (0, 0, 0))\n self.sprite.imprimir(tela, \"borrachona\", self.x, self.y, 0, 0, 0, 0, 0, 0)\n tela.blit(self.__escreve_na_tela, (self.x+90, self.y+35))\n \n def atualizar_hud(self, tela, mapa, dimensoes_tela, moedas_pegas):\n \"Atualiza o contador interno dele com o do Jogador\"\n self.__numero_biscoitos = moedas_pegas\n self.renderizar(tela, mapa)\n return False\n\n\nclass BarraPoder(Estatico):\n \"Indica a recarga e duracao do poder\"\n def __init__(self, x: int, y: int):\n altura = 40\n largura = 188\n self.__largura_atual = largura\n self.__cor_poder = (0, 0, 0)\n self.__corpo_poder = []\n super().__init__(\"poder_barra\", x, y, altura, largura, \"sprites\", (205, 133, 63))\n #self.sprite = SpriteSheetBarras()\n\n def atualizar_hud(self, tela, mapa, dimensoes_tela):\n \"Checa se o jogador trocou de poder e tenta renderizar\"\n self.__cor_poder = mapa.jogador.poder.cor\n self.__largura_atual = (abs(mapa.jogador.poder.descanso - mapa.jogador.poder.recarga))/mapa.jogador.poder.recarga * self.largura\n self.__corpo_poder = pygame.Rect(self.x, self.y, self.__largura_atual, self.altura)\n self.renderizar(tela, mapa)\n return False\n\n def renderizar(self, tela, mapa):\n \"Mostra quanto falta para recarregar ou acabar o poder, e qual ele eh\"\n pygame.draw.rect(tela, self.cor, self.corpo)\n pygame.draw.rect(tela, self.__cor_poder, self.__corpo_poder)\n nome = self.nome+\"_\"+mapa.jogador.poder.nome\n self.sprite.imprimir(tela, nome, self.x-70, self.y-18, 0, 0, 0, 0, 0, 0)\n\n\nclass Paleta(Estatico):\n \"Mostra o progresso de colecao de paletas no mapa\"\n def __init__(self, x: int, y: int):\n altura = 40\n largura = 40\n self.__largura_atual = largura\n super().__init__(\"paleta\", x, y, altura, largura, \"sprites\", (205, 133, 63))\n self.__paletas_coletadas = 0\n self.__fonte = pygame.font.SysFont('Arial', 40)\n self.__escreve_na_tela= \"\"\n\n def renderizar(self, tela, mapa):\n \"Renderiza a soma das partes coletadas\"\n nome = self.nome + \"_\" + str(self.__paletas_coletadas)\n self.sprite.imprimir(tela, nome, self.x, self.y, 0, 0, 0, 0, 0, 0)\n\n def atualizar_hud(self, tela, mapa, dimensoes_tela, paletas_pegas):\n self.__paletas_coletadas = paletas_pegas\n self.renderizar(tela, mapa)\n return False\n\nclass ArmazenadoPoder(Estatico):\n \"Mostra qual poder o jogador guardou para trocar quando necessario\"\n def __init__(self, x: int, y: int):\n altura = 40\n largura = 40\n self.__cor_poder = (0, 0, 0)\n self.__corpo_poder = []\n super().__init__(\"poder_armazenado\", x, y, altura, largura, \"sprites\", (205, 133, 63))\n #self.sprite = SpriteSheetBarras()\n\n def atualizar_hud(self, tela, mapa, dimensoes_tela):\n self.renderizar(tela, mapa)\n return False\n\n def renderizar(self, tela, mapa):\n \"Poe na tela o icone do poder guardado\"\n nome = \"poder_\"+mapa.jogador.poder_armazenado.nome\n self.sprite.imprimir(tela, nome, self.x-70, self.y-18, 0, 0, 0, 0)","repo_name":"bnmfw/Rabisco","sub_path":"game/hud.py","file_name":"hud.py","file_ext":"py","file_size_in_byte":6632,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4978464184","text":"# version 1.5 vom 20.11.23\nimport time\nimport asyncio\nimport paho.mqtt.publish as publish\nimport paho.mqtt.client as mqtt\nimport configparser\nfrom huawei_solar import HuaweiSolarBridge\n\ndef on_connect(client, userdata, flags, rc):\n if rc == 0:\n print(\"Verbunden mit dem Broker\")\n else:\n print(f\"Verbindung fehlgeschlagen, Rückgabewert={rc}\")\n\ndef on_disconnect(client, userdata, rc):\n if rc != 0:\n print(\"Unerwartete Trennung, versuche erneut zu verbinden\")\n try_reconnect(client)\n\ndef try_reconnect(client):\n max_reconnect_attempts = 3\n current_attempt = 0\n while not client.is_connected() and current_attempt < max_reconnect_attempts:\n try:\n print(\"Versuche erneut zu verbinden...\")\n client.reconnect()\n current_attempt += 1\n time.sleep(2)\n except Exception as e:\n print(f\"Fehler beim erneuten Verbinden: {e}\")\n break\n\n\n\nasync def huaweiReadValues(bridge, topic_mapping, mqtthost, mqttclient, registers):\n\n client = mqtt.Client()\n client.on_connect = on_connect\n client.on_disconnect = on_disconnect\n\n # Setzen Sie hier weitere Konfigurationsoptionen für den Client\n\n client.connect(mqtthost, 1883, 60)\n\n\n\n if bridge is None:\n bridge = await HuaweiSolarBridge.create(host=\"192.168.200.1\", port=6607)\n print(\"Verbindungsbruecke herstellen\")\n\n while True:\n try:\n register_values = {}\n\n for register in registers:\n try:\n result = await bridge.client.get(register)\n\n message = 0 if result[0] in [None, 0] else result[0]\n mqtttopic = None\n\n if register == 'input_power':\n mqtttopic = f\"openWB/set/pv/{pvnumber}/get/power\"\n message = result[0] * -1\n # print(f\"Eingangsleistung: {result[0]}\")\n\n elif register == 'daily_yield_energy':\n mqtttopic = f\"openWB/set/pv/{pvnumber}/get/exported\"\n message = result[0] * 1000\n # print(f\"Taegliche Ertragsenergie: {result[0]}\")\n\n if mqtttopic:\n register_values[register] = message\n except:\n pass\n\n for register, value in register_values.items():\n mqtttopic = topic_mapping.get(register)\n if mqtttopic:\n try:\n publish.single(mqtttopic, payload=value, qos=0, retain=False, hostname=mqtthost,\n client_id=mqttclient)\n except:\n pass\n\n await asyncio.sleep(10)\n\n except KeyboardInterrupt:\n await bridge.stop()\n break\n\n await bridge.stop()\n\ndef read_config():\n config = configparser.ConfigParser()\n config.read('/home/pi/huawei_bridge_openwb/config.ini')\n return config\n\nconfig = read_config()\nmqtthost = config['MQTT']['host']\npvnumber = config.getint('pvnumber', 'value')\ncounter_number = config.getint('counternumber', 'value')\nbat_number = config.getint('batnumber', 'value')\n\n# Dictionary MQTT-Topic Mapping\ntopic_mapping = {\n 'input_power': f\"openWB/set/pv/{pvnumber}/get/power\",\n 'daily_yield_energy': f\"openWB/set/pv/{pvnumber}/get/exported\",\n}\n\n# Registernames\nregisters = ['input_power', 'daily_yield_energy',]\n\nbridge = None\nloop = asyncio.get_event_loop()\nloop.run_until_complete(huaweiReadValues(bridge, topic_mapping, mqtthost, \"PVImporter\", registers))\n","repo_name":"AlexanderMetzger/huawei_openwb_bridge","sub_path":"huaweimqtt2ndinv.py","file_name":"huaweimqtt2ndinv.py","file_ext":"py","file_size_in_byte":3661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23704546622","text":"list = []\nnome = []\nprova = [[], [], []]\n\nfor c in range(1, 6):\n nt = str(input(f'Questão {c}: '))\n list.append(nt)\n\nfor i in range(0, 3):\n n = str(input('Nome: '))\n nome.append(n)\n for c in range(1, 6):\n na = str(input(f'Questão {c}: '))\n list[i].append(na)\n\n\nprint(list)\nprint(nome)\nprint(prova)","repo_name":"annakesyalima/python","sub_path":"programas/logica-em-python/Aula14f.py","file_name":"Aula14f.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73029318133","text":"import sys\n\nwin_default_shells = [\"cmd.exe\", \"powershell\", \"git_bash\", \"cygwin\"]\nshells = [\"bash\", \"zsh\"]\n\nif sys.platform == \"win32\":\n shells = win_default_shells\n\ndef pytest_addoption(parser):\n parser.addoption(\"--shell\", action=\"append\", default=[],\n help=\"list of shells to run shell tests on\")\n\ndef pytest_generate_tests(metafunc):\n if 'shell' in metafunc.fixturenames:\n metafunc.parametrize(\"shell\", metafunc.config.option.shell)\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/conda_conda/conda-master/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"73884479092","text":"import numpy as np\nimport pandas as pd\nfrom sklearn import svm\nimport warnings\n\nwarnings.filterwarnings(\"ignore\", category=FutureWarning, module=\"sklearn\", lineno=196)\n\n\nf = open('x_data.csv')\ndf = pd.read_csv(f)\nx_data = np.array(df)\n\nf = open('y_data.csv')\ndf = pd.read_csv(f)\n#y_data = np.array(df)\ny_data_temp = np.array(df)\ny_data = np.zeros(1)\nfor temp in y_data_temp:\n # y_data.append(int(temp[0]))\n y_data = np.append(y_data, temp[0])\n\ny_data = y_data[1:len(y_data)]\n\n#y_data = np.loadtxt(open(\"y_data.csv\", \"rb\"), delimiter=\",\", skiprows=0)\n\nprint(\"-------------1--------------\")\nprint(x_data)\nprint(y_data)\n#建立支持向量分类模型\nclf = svm.SVC()\nprint(\"-------------0--------------\")\n#拟合训练数据��得到训练模型参数\nclf.fit(x_data,y_data)\nprint(\"---------------------------\")\n#对测试点[2., 2.], [3., 3.]预测\nresult = clf.predict([[7, 57600000, 2, 9020, 16548.48],\n [11, 892685.14, 1, 2050, 769556.15],\n [7, 263.95, 1, 3090, 225.6],\n [11, 4288.93, 1, 2050, 3697.35],\n [7, 32916, 1, 2080, 28375.86],\n [7, 7504889.99, 1, 2020, 7286301.01]])\n\nprint(result)\n\n\n##成功输出,但计算过程巨慢巨慢\n##[ 2. 11. 17. 2. 15. 10.]\n\n\n\n#X = [[0, 0], [1, 1]]\n#y = [0, 1]\n#clf = svm.SVC()\n#clf.fit(X, y)\n#res = clf.predict([[2., 2.],[3., 3.]])\n#输出2,3","repo_name":"jaysonzhao/pythonmisc","sub_path":"sklearn分类使用对比/svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42969430933","text":"import pytest\nfrom transcriptor.helpers import text_in_range\n\n\n@pytest.fixture()\ndef segments():\n segment = [\n {\n \"start_time\":0.000,\n \"end_time\": 1.0000,\n \"type\":\"pronunciation\",\n \"alternatives\": [{\n \"confidence\": 1.0,\n \"content\": \"Once\",\n }],\n \n },\n {\n \"start_time\":1.000,\n \"end_time\": 2.0000,\n \"type\":\"pronunciation\",\n \"alternatives\": [{\n \"confidence\": 1.0,\n \"content\": \"Upon\"\n }],\n },\n {\n \"start_time\":3.000,\n \"end_time\": 4.0000,\n \"type\":\"pronunciation\",\n \"alternatives\": [{\n \"confidence\": 0.0,\n \"content\": \"b\"\n },\n {\n \"confidence\": 0.4,\n \"content\": \"A\"\n },\n ],\n },\n {\n \"start_time\":4.000,\n \"end_time\": 6.0000,\n \"type\": \"punctuation\",\n \"alternatives\": [{\n \"confidence\": 1.0,\n \"content\": \".\"\n }],\n },\n ]\n return segment\n\ndef test_text_in_range_gets_only_text_in_range(segments):\n \"\"\"Given a segment text_in_range returns the text between the \n start/stop times\"\"\"\n\n # Test that only the 2 of the segments are returned\n target_start_time = 1.3\n target_end_time = 3.5\n\n test_text = text_in_range(segments, target_start_time, target_end_time)\n assert test_text == \"Upon A\"\n\n\ndef test_text_in_range_doesnt_add_space_for_punctuation(segments):\n \"\"\"Punctuation should not have a space before it.\"\"\" \n\n target_start_time = 2.3\n target_end_time = 7.5\n\n test_text = text_in_range(segments, target_start_time, target_end_time)\n assert test_text == \"A.\"\n\n","repo_name":"kjaymiller/transcriptor","sub_path":"tests/test_helpers.py","file_name":"test_helpers.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"21"} +{"seq_id":"4869473367","text":"import tensorflow as tf\nimport numpy as np\nimport os\nfrom pymongo import MongoClient\nimport cStringIO\n# import io\nimport base64\nfrom PIL import Image\nfrom tqdm import tqdm, trange\n\n\ndef lrelu(x, alpha=0.1):\n return tf.maximum(alpha * x, x)\n\n\nclass ImageAutoEncoder:\n epochs = 10000\n input_size = [None, 128, 128, 3]\n compressed_size = [None, 16, 16, 32]\n model_directory = '.\\image_encoder_trained_model'\n\n def __init__(self):\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)\n self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n self.x = tf.placeholder(tf.float32, self.input_size, name='x')\n self.encoded_x = tf.placeholder(tf.float32, self.compressed_size, name='encoded_x')\n\n #Convolution layers\n conv0 = tf.layers.conv2d(self.x, 64, 3, padding=\"same\", activation=lrelu, name='conv0')\n pool0 = tf.layers.max_pooling2d(conv0, 2, 2, padding=\"same\", name='pool0')\n conv1 = tf.layers.conv2d(pool0, 48, 3, padding=\"same\", activation=lrelu, name='conv1')\n pool1 = tf.layers.max_pooling2d(conv1, 2, 2, padding=\"same\", name='pool1')\n conv2 = tf.layers.conv2d(pool1, 32, 3, padding=\"same\", activation=lrelu, name='conv2')\n pool2 = tf.layers.max_pooling2d(conv2, 2, 2, padding=\"same\", name='pool2')\n\n # Dense layers\n # flat = tf.reshape(pool2, [-1, 16*16*32])\n # dense0 = tf.layers.dense(flat, units=1024, activation=lrelu, name='dense0')\n # dense1 = tf.layers.dense(dense0, units=512, activation=lrelu, name='dense1')\n self.encoder = pool2\n self.training_decoder = self.build_decoder(self.encoder, reuse=False)\n self.decoder = self.build_decoder(self.encoded_x)\n # self.loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=self.training_decoder, labels=self.x)\n self.loss = tf.reduce_sum(tf.abs(self.x - self.training_decoder), name='loss')\n self.cost = tf.reduce_mean(self.loss)\n self.train = tf.train.AdamOptimizer().minimize(self.cost, name='train')\n tf.add_to_collection('x', self.x)\n tf.add_to_collection('encoded_x', self.encoded_x)\n tf.add_to_collection('encoder', self.encoder)\n tf.add_to_collection('decoder', self.decoder)\n tf.add_to_collection('training_decoder', self.training_decoder)\n tf.add_to_collection('loss', self.loss)\n tf.add_to_collection('cost', self.cost)\n tf.add_to_collection('train', self.train)\n self.saver = tf.train.Saver()\n\n @staticmethod\n def build_decoder(x, reuse=True):\n # decode1 = tf.layers.dense(x, units=1024, activation=lrelu, name='decode1', reuse=reuse)\n # decode0 = tf.layers.dense(decode1, units=16*16*32, activation=lrelu, name='decode0', reuse=reuse)\n\n # Deconvolution layers\n # blowout = tf.reshape(decode0, [-1, 16, 16, 32])\n deconv2 = tf.layers.conv2d(x, 32, 3, padding=\"same\", activation=lrelu, name='deconv2', reuse=reuse)\n upsamp2 = tf.layers.conv2d_transpose(deconv2, 32, 3, 2, padding=\"same\", name=\"upsamp2\", reuse=reuse)\n upsamp1 = tf.layers.conv2d_transpose(upsamp2, 48, 3, 2, padding=\"same\", name='upsamp1', reuse=reuse)\n upsamp0 = tf.layers.conv2d_transpose(upsamp1, 64, 3, 2, padding=\"same\", name='upsamp0', reuse=reuse)\n decoded = tf.layers.conv2d(upsamp0, 3, 3, padding=\"same\", name='decoded', reuse=reuse)\n # return tf.sigmoid(decoded, name='sigged')\n return decoded\n\n def start_training(self, load_existing_model=True):\n self.sess.run(tf.global_variables_initializer())\n if load_existing_model and os.path.exists(self.model_directory):\n self.load_trained_model()\n else:\n self.sess.run(tf.global_variables_initializer())\n epoch_iterator = trange(self.epochs)\n for epoch in epoch_iterator:\n current_sum = 0\n count = 0\n batch_size = 10\n batch_count = 0\n batch = []\n try:\n for album in tqdm(MongoClient().albart.albums.find(), total=MongoClient().albart.albums.count()):\n if batch_count >= batch_size:\n try:\n feed_dict = {self.x: batch}\n loss, _ = self.sess.run([self.cost, self.train], feed_dict=feed_dict)\n count += 1\n current_sum += loss\n batch_count = 0\n batch = []\n except Exception as fail:\n continue\n else:\n img = cStringIO.StringIO(base64.b64decode(album[\"image\"]))\n img = Image.open(img)\n # buff = io.BytesIO(base64.b64decode(album[\"image\"]))\n # img = Image.open(buff)\n # Need to resize image once we have the image selector\n img.thumbnail((128, 128), Image.ANTIALIAS)\n resized_img = np.asarray(img)\n resized_img = resized_img / 255.0\n batch.append(resized_img)\n batch_count += 1\n if len(batch) > 0:\n try:\n feed_dict = {self.x: batch}\n loss, _ = self.sess.run([self.loss, self.train], feed_dict=feed_dict)\n count += 1\n current_sum += loss\n except Exception as fail:\n continue\n except Exception as fail:\n continue\n finally:\n self.save_trained_model()\n epoch_iterator.set_description('Epoch {} average training loss: {}'.format(epoch, (current_sum / count) / batch_size))\n\n def save_trained_model(self):\n self.saver.save(self.sess, os.path.join(self.model_directory, 'model'))\n\n def load_trained_model(self):\n self.saver.restore(self.sess, tf.train.latest_checkpoint(self.model_directory))\n\n def encode(self, image):\n return self.sess.run(self.encoder, feed_dict={self.x: image})\n\n def decode(self, encoded_image):\n return self.sess.run(self.decoder, feed_dict={self.encoded_x: encoded_image})\n\ndef main():\n auto_encoder = ImageAutoEncoder()\n auto_encoder.start_training()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"kamenomagic/albumart","sub_path":"image_auto_encoder.py","file_name":"image_auto_encoder.py","file_ext":"py","file_size_in_byte":6456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70523497972","text":"#!/usr/bin/env python3\n\nimport os\nos.environ['PYTHONPATH'] = os.path.dirname(os.path.realpath(__file__))\n\nimport re\nimport subprocess\n\nimport conf\nimport utils\n\n\ndef xkb_indicator(show_emblem=True, timeout=1):\n emblem = conf.EMBLEM['system']['indicator']\n color = conf.COLOR['xkb_indicator']['active']\n\n label = ''\n xkb_status = subprocess.check_output(['xset', 'q'], timeout=timeout)\n\n numlock_status = re.search('(Num Lock: +)(on|off)', xkb_status.decode())\n capslock_status = re.search('(Caps Lock: +)(on|off)', xkb_status.decode())\n\n if numlock_status.group(2) == \"on\":\n num_color = conf.COLOR['xkb_indicator']['active']\n else:\n num_color = conf.COLOR['xkb_indicator']['inactive']\n\n label += utils.colorize(conf.EMBLEM['indicator']['numlock'], num_color)\n\n if capslock_status.group(2) == \"on\":\n caps_color = conf.COLOR['xkb_indicator']['active']\n else:\n caps_color = conf.COLOR['xkb_indicator']['inactive']\n\n label += utils.colorize(conf.EMBLEM['indicator']['capslock'], caps_color)\n\n if show_emblem:\n return \"{0} {1}\".format(utils.colorize(emblem, color), label)\n\n return label\n\n\nif __name__ == '__main__':\n print(xkb_indicator())\n","repo_name":"duyhenryer/i3wm-config","sub_path":"src/tabbar/xkb_indicator.py","file_name":"xkb_indicator.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"41781887393","text":"#!python\n\n\ndef merge(items_1, items_2):\n \"\"\"Merge given lists of items, each assumed to already be in sorted order,\n and return a new list containing all items in sorted order.\n Running time: O(n) - have to go through the whole array\n Memory usage: O(m) - we are creating new array which is exact size of the input\"\"\"\n\n # are the lists empty?\n if not items_1 and not items_2:\n return []\n\n # used to merge two sorted lists together\n merged_list = []\n index_1 = 0\n index_2 = 0\n\n # iterate until at least one of the lists are done\n while (index_1 < len(items_1)) and (index_2 < len(items_2)):\n \n if items_1[index_1] <= items_2[index_2]:\n merged_list.append(items_1[index_1])\n index_1 += 1\n else:\n merged_list.append(items_2[index_2])\n index_2 += 1\n\n # are there any elements left in items_1 list?\n if index_1 <= len(items_1)-1:\n # copy the rest of the elements from array items_1 to merged_list\n while index_1 < len(items_1):\n merged_list.append(items_1[index_1])\n index_1 += 1\n\n # are there any elements left in items_2 list?\n elif index_2 <= len(items_2)-1:\n # copy the rest of the elements from array items_2 to merged_list\n while index_2 < len(items_2):\n merged_list.append(items_2[index_2])\n index_2 += 1\n\n return merged_list\n\n\n\ndef split_sort_merge(items):\n \"\"\"Sort given items by splitting list into two approximately equal halves,\n sorting each with an iterative sorting algorithm, and merging results into\n a list in sorted order.\n Running time: O(n^2) - but depends on the sorting algo we are using.\n Memory usage: O(n) - we are creating new list at the end to merge\"\"\"\n # Split items list into approximately equal halves\n mid = len(items) // 2\n first_half = items[:mid]\n second_half = items[mid:]\n # Sort each half using any other sorting algorithm\n first_half = merge_sort(first_half)\n second_half = merge_sort(second_half)\n # Merge sorted halves into one list in sorted order\n merged = merge(first_half, second_half)\n return merged\n\n\ndef merge_sort(items):\n \"\"\"Sort given items by splitting list into two approximately equal halves,\n sorting each recursively, and merging results into a list in sorted order.\n Running time: O(nlogn) - split, then split the splitted ....\n Memory usage: O(nlogn) - recursive stack\"\"\"\n # Check if list is so small it's already sorted (base case)\n if len(items) <= 1:\n return items\n else:\n # Split items list into approximately equal halves\n mid = len(items) // 2\n first_half = items[:mid]\n second_half = items[mid:]\n # Sort each half by recursively calling merge sort\n first_half = merge_sort(first_half)\n second_half = merge_sort(second_half)\n # Merge sorted halves into one list in sorted order\n return merge(first_half, second_half)\n\n\ndef find_median(items: list, low=None, high=None):\n \"\"\"\n Find the median out of 3 numbers and swap the items in the list so median\n will be at the end of the list\n \"\"\"\n # set the initial values of low and high\n if low == None and high == None:\n low = 0\n high = len(items) - 1\n\n mid = (high + low) // 2\n\n if items[mid] < items[low]:\n items[mid], items[low] = items[low], items[mid]\n if items[high] < items[low]:\n items[high], items[low] = items[low], items[high]\n if items[mid] < items[high]:\n items[mid], items[high] = items[high], items[mid]\n\ndef partition(items, low, high):\n \"\"\"Return index `p` after in-place partitioning given items in range\n `[low...high]` by choosing a pivot (TODO: document your method here) from\n that range, moving pivot into index `p`, items less than pivot into range\n `[low...p-1]`, and items greater than pivot into range `[p+1...high]`.\n Running time: O(logn) - assuming both halves of the arr are in the same len\n Memory usage: O(1) - not creating any new arr\"\"\"\n # Choose a pivot any way and document your method in docstring above\n # Loop through all items in range [low...high]\n # Move items less than pivot into front of range [low...p-1]\n # Move items greater than pivot into back of range [p+1...high]\n # Move pivot item into final position [p] and return index\n print(f\"low is {low}, high is {high}\")\n\n if high - low >= 2:\n print(f\"items before median: {items}\")\n find_median(items, low, high)\n print(f\"items after: {items}\")\n\n # last element is the pivot\n pivot = items[high]\n pivot_index = low\n\n for i in range(low, high):\n # if there is smaller number than the pivot, swap it with pivot index\n if items[i] <= pivot:\n items[i], items[pivot_index] = items[pivot_index], items[i]\n # shift the pivot index to right\n pivot_index += 1\n\n # swap the pivot num from the end to the pivot index\n items[high], items[pivot_index] = items[pivot_index], items[high]\n\n return pivot_index\n \n\n\ndef quick_sort(items, low=None, high=None):\n \"\"\"Sort given items in place by partitioning items in range `[low...high]`\n around a pivot item and recursively sorting each remaining sublist range.\n Best case running time: O(nlogn) - two halves of the arr are equal size apprx.\n Worst case running time: O(n^2) - imbalanced halves of the array\n Memory usage: O(logn) - recursion\"\"\"\n # Check if high and low range bounds have default values (not given)\n # Check if list or range is so small it's already sorted (base case)\n # Partition items in-place around a pivot and get index of pivot\n # Sort each sublist range by recursively calling quick sort\n if low == None and high == None:\n low = 0\n high = len(items)-1\n\n if low < high:\n # get the pivot index\n pivot_index = partition(items, low, high)\n # left half of the list to be sorted\n quick_sort(items, low, pivot_index - 1)\n # right half of the list to be sorted\n quick_sort(items, pivot_index + 1, high)\n","repo_name":"makhmudislamov/Trees-Sorting","sub_path":"Code/sorting_recursive.py","file_name":"sorting_recursive.py","file_ext":"py","file_size_in_byte":6168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43037816207","text":"import math\n\n\ndef main():\n n = int(input())\n k = math.floor(math.log2(n))\n if n >= 2**(k+1):\n print(k+1)\n elif n < 2**k:\n print(k-1)\n else:\n print(k)\n\n\nmain()","repo_name":"batamorphism/coding","sub_path":"Python/AtCoder/old/ABC215-B.py","file_name":"ABC215-B.py","file_ext":"py","file_size_in_byte":194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31202171196","text":"import HomeAutomationSource\nimport home_utilities\n\n\ndef revert(revert_str):\n home_utilities.speak(revert_str)\n\n\ndef perform_operation(ob,op):\n z = 'turning '+op+' the '+ob\n revert(z)\n \n\ndef read_object_state(obj):\n return 'on'\n\ndef deal_ambiguity(keyMap_conf, ambiguity, keyMap_object, keyMap_operation):\n print(ambiguity)\n if ambiguity['object'] == 0:\n revert('i did not get that! Please say that again')\n \n if ambiguity['object'] == 1 and ambiguity['operation'] == 1:\n perform_operation(keyMap_object[0],keyMap_operation[0])\n return\n if ambiguity['object'] <= 2 and ambiguity['operation'] == 0:\n for obj in keyMap_object:\n c = read_object_state(obj)\n cim = 'do you want me to switch '+c+' the '+obj\n revert(cim)\n string = home_utilities.accept_string_command()\n if 'yes' in string :\n perform_operation(obj,c)\n else:\n if c == 'on':\n perform_operation(obj,'off')\n elif c == 'off':\n perform_operation(obj,'on')\n return\n \n elif ambiguity['operation'] == 1 and ambiguity['object'] <= 2:\n for obj in keyMap_object:\n perform_operation(obj,keyMap_operation[0])\n return\n else:\n revert('i can process only two commands at once')\n\n \n \ndef perform_keyBindings_match(keyMap_conf, inputString):\n (keyBindings_object2, value_keyBindings_operation2,\n keyBindings_conf2) = HomeAutomationSource.function_keyBindings_words(\n inputString\n )\n if (keyMap_conf + keyBindings_conf2) == 100:\n perform_operation(keyBindings_object2, value_keyBindings_operation2)\n else:\n revert('please say that again')\n \ndef perform_query_match(keyMap_conf, inputString): \n (QueryMatching_object, QueryMatching_operation,\n Query_conf) = HomeAutomationSource.Query_matching(\n inputString\n )\n if Query_conf == 100:\n perform_operation(QueryMatching_object, QueryMatching_operation)\n else:\n perform_keyBindings_match(keyMap_conf, inputString)\n\ndef check_ambiguity(inputEntries, inputString):\n ( keyMap_object, keyMap_operation, keyMap_conf,\n ambiguity ) = HomeAutomationSource.function_keyMap(\n inputEntries\n )\n \n if keyMap_conf == 40:\n perform_query_match(keyMap_conf, inputString)\n else:\n deal_ambiguity(keyMap_conf, ambiguity, keyMap_object, keyMap_operation)\n \n \n\n \ndef accept_command1(inputString):\n print(\"+++++++++++\" ,inputString)\n\n #Applying tockenaization to input string and stored in dictonary\n input_entries = home_utilities.raw_String_To_List(str(inputString),1) \n\n check_ambiguity(input_entries, inputString)\n","repo_name":"shashankrp/IOT","sub_path":"recemendingSystem.py","file_name":"recemendingSystem.py","file_ext":"py","file_size_in_byte":3181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3639032483","text":"# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\nimport os\nimport csv\nimport requests\nimport logging\nfrom funda.utils import clean\nfrom datetime import date\nfrom scrapy.exporters import CsvItemExporter\nfrom scrapy.exceptions import CloseSpider\n\nclass SaveToFilePipeline:\n def open_spider(self, spider):\n self.exporter_files = {}\n self.items_processed = 0\n self.items_already_exist = 0\n self.previous_records = {}\n\n\n def close_spider(self, spider):\n logging.log(logging.INFO, f'Items processed {self.items_processed}')\n logging.log(logging.INFO, f'Items existed {self.items_already_exist}')\n\n # Check for previous records not found this time\n items_removed = 0\n with open(f'data/{spider.search_area}-{date.today()}_removed.csv', 'w+') as csvfile:\n writer = csv.writer(csvfile)\n for key, value in self.previous_records.items():\n if value:\n items_removed += 1\n writer.writerow([key])\n logging.log(logging.INFO, f'Items removed {items_removed}')\n\n # Close all opened exporters\n for exporter, csv_file in self.exporter_files.values():\n exporter.finish_exporting()\n csv_file.close()\n\n\n def _area_exporter(self, search_area):\n '''In case multiple search areas are active\n multiple exporters to be assigned\n Checks if 'search_area' exporter already exists\n if not, it's being created\n '''\n if search_area not in self.exporter_files:\n # Create exporter\n csv_file = open(f'data/{search_area}-{date.today()}.csv', 'wb+')\n exporter = CsvItemExporter(csv_file)\n exporter.start_exporting()\n self.exporter_files[search_area] = (exporter, csv_file)\n\n # Search for historical data\n self._get_previous_records(search_area)\n\n return self.exporter_files[search_area][0]\n\n\n def process_item(self, item, spider):\n self.items_processed += 1\n id = int(item['id'])\n if self.previous_records.get(id, False):\n self.items_already_exist += 1\n self.previous_records[id] = False\n else:\n # Clean data and export\n item['floor_area'] = clean.area(item['floor_area'])\n item['property_area'] = clean.area(item['property_area'])\n item['price'] = clean.price(item['price'])\n item['rooms'] = clean.rooms(item['rooms'])\n item['address'] = clean.postcode(item['address'])\n item['lat'], item['lon'] = self._get_coordinates(item['address'])\n\n exporter = self._area_exporter(spider.search_area)\n exporter.export_item(item)\n\n if self.previous_records.get(id, False):\n raise CloseSpider('Item not removed')\n\n logging.log(logging.DEBUG, f'Items processed {self.items_processed}')\n return item\n\n\n def _get_previous_records(self, search_area):\n '''Loads data from previous crawls. Compares items added/removed'''\n\n with os.scandir('data/') as files:\n for file in files:\n if not file.name.startswith('.')\\\n and file.is_file()\\\n and file.name.split('.')[-1] == 'csv'\\\n and '_removed' not in file.name\\\n and search_area in file.name:\n self._load_previous_data(file.path)\n\n\n def _load_previous_data(self, file_path):\n with open(file_path) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n self.previous_records[int(row['id'])] = True\n\n\n def _get_coordinates(self, address):\n '''Gets address coordinates from Nomintim API'''\n\n url = 'https://nominatim.openstreetmap.org/search'\n params = {'q': address,\n 'format': 'json'}\n response = requests.get(url, params).json()\n if response:\n return response[0]['lat'], response[0]['lon']\n return None, None\n","repo_name":"JanKrl/funda_scrap","sub_path":"funda/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":4188,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"12489589786","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 24 10:15:13 2021\nLogistic Regression\n@author: Muhammad Asghar Ali Qureshi\n\"\"\"\n\n\n##############################################################################\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\n##############################################################################\n############################## Loading DataSet ###############################\n\ndata_set = pd.read_csv('./Social_Network_Ads.csv')\n\n##############################################################################\n####################### Defining I.V and D.V #################################\n\nX = data_set.iloc[:, 2:-1].values\nY = data_set.iloc[:, -1].values\n\n##############################################################################\n\n####################### Splitting Test And Train #############################\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.25,\n random_state = 42)\n\n\n#############################################################################\n################### Applying Feature Scalling ###############################\n\nfrom sklearn.preprocessing import StandardScaler\nstd_scalar = StandardScaler()\nstd_scalar_train = std_scalar.fit(X_train)\nstd_scalar_train = std_scalar_train.transform(X_train)\n\nstd_scalar_test = std_scalar.fit(X_test)\nstd_scalar_test = std_scalar_test.transform(X_test)\n\nX_train, X_test = std_scalar_train, std_scalar_test\n\n##############################################################################\n################# Creating classifier for logistic Regression ################\n\nfrom sklearn.linear_model import LogisticRegression\nclassifier = LogisticRegression(random_state=42)\nclassifier = classifier.fit(X_train, Y_train)\n\nY_pred = classifier.predict(X_test)\n\n##############################################################################\n############ Analyzing model by Confusion Matrix #############################\n\nfrom sklearn.metrics import confusion_matrix\ncfm = confusion_matrix(Y_test, Y_pred)\n\n##############################################################################\n############## Visualizing model #############################################\n\nfrom matplotlib.colors import ListedColormap\nX_set, y_set = X_train, Y_train\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),\n np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\nplt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),\n alpha = 0.75, cmap = ListedColormap(('#3f000f', '#02075d')))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\nfor i, j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n c = ListedColormap(('#3f000f', '#02075d'))(i), label = j)\nplt.title('Logistic Regression (Train set)')\nplt.xlabel('Age')\nplt.ylabel('Estimated Salary')\nplt.legend()\nplt.show()\n\n\nfrom matplotlib.colors import ListedColormap\nX_set, y_set = X_test, Y_test\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),\n np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\nplt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),\n alpha = 0.75, cmap = ListedColormap(('#3f000f', '#02075d')))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\nfor i, j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n c = ListedColormap(('#3f000f', '#02075d'))(i), label = j)\nplt.title('Logistic Regression (Test set)')\nplt.xlabel('Age')\nplt.ylabel('Estimated Salary')\nplt.legend()\nplt.show() \n\n","repo_name":"muhammadasgharaliqureshi/Machine_learning_A_to_Z","sub_path":"Logistic Regression/Logistic_Regression.py","file_name":"Logistic_Regression.py","file_ext":"py","file_size_in_byte":3931,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42459708211","text":"from django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.core.files.storage import FileSystemStorage\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib import messages\nfrom .forms import RecipeForm, LogIn, UserRegistrationForm\nfrom .models import Recipe, Categories\n\n\n# import logging\n# from datetime import datetime\n#\n# logger = logging.getLogger(__name__)\n# logging.basicConfig(level=logging.INFO, filename=\"./logs/lesson1.log\", filemode='a', format='%(levelname)s %(message)s')\n# Create your views here.\n\ndef main(request):\n recipes = Recipe.objects.all().order_by(\"?\")\n # logger.info('Пользователь успешно зашел: ' + str(datetime.now()))\n return render(request, \"recipes.html\", {\"Recipes\": recipes[:9]})\n\n\ndef recipe_categories(request, category):\n # logger.info('Пользователь успешно зашел: ' + str(datetime.now()))\n category = get_object_or_404(Categories, name=category)\n recipes = Recipe.objects.filter(categories=category.id)\n return render(request, \"recipes.html\", {\"Recipes\": recipes[:9]})\n\ndef recipe(request, recipe_id):\n # logger.info('Пользователь успешно зашел: ' + str(datetime.now()))\n recipes = Recipe.objects.filter(pk=recipe_id).first()\n return render(request, \"recipe.html\", {\"Recipes\": recipes})\n\ndef edit_recipe(request, recipe_id):\n # logger.info('Пользователь успешно зашел: ' + str(datetime.datetime.now()))\n if request.method == 'POST':\n form = RecipeForm(request.POST, request.FILES)\n message = 'Ошибка в данных'\n recipe = get_object_or_404(Recipe, pk=recipe_id)\n if recipe.customer == request.user:\n if form.is_valid():\n datas = {\"categories\": form.cleaned_data['categories'], \"title\": form.cleaned_data['title'], \"products\": form.cleaned_data['products'], \"description\": form.cleaned_data['description'], \"cooking_steps\": form.cleaned_data['cooking_steps'], \"cooking_time\": form.cleaned_data['cooking_time'], \"image\": form.cleaned_data['image']}\n recipe = get_object_or_404(Recipe, pk=recipe_id)\n for data in datas:\n if datas[data] != \"\" and data == \"title\":\n recipe.title = datas[data]\n elif datas[data] != \"\" and data == \"description\":\n recipe.description = datas[data]\n elif datas[data] != \"\" and data == \"categories\":\n category = datas[data]\n elif datas[data] != \"\" and data == \"products\":\n recipe.products = datas[data]\n elif datas[data] != \"\" and data == \"cooking_steps\":\n recipe.cooking_steps = datas[data]\n elif datas[data] != \"\" and data == \"cooking_time\":\n recipe.cooking_time = datas[data]\n elif datas[data] != \"\" and data == \"image\":\n image = datas[data]\n fs = FileSystemStorage()\n fs.save(image.name, image)\n recipe.image = image.name\n categories = get_object_or_404(Categories, name=category)\n recipe.categories = categories\n recipe.save()\n message = 'Рецепт обновлен'\n return render(request, 'edit_recipe.html', {'form': form, 'message': message})\n else:\n message = 'Это не ваш рецепт. Ничего не выйдет'\n return render(request, 'edit_recipe.html', {'form': form, 'message': message})\n else:\n form = RecipeForm()\n message = 'Заполните форму'\n return render(request, 'edit_recipe.html', {'form': form, 'message': message})\n\ndef add_recipe(request):\n title = 'Добавить рецепт'\n head_title = 'Добавить рецепт: '\n cooking_form = RecipeForm(request.POST, request.FILES)\n if request.method == 'GET':\n return render(request, 'add_recipe.html',\n {'form': cooking_form,\n 'title': title,\n 'head_title': head_title,\n 'button': f'Add_recipe',})\n elif request.method == 'POST':\n if cooking_form.is_valid():\n name = cooking_form.cleaned_data['title']\n category = cooking_form.cleaned_data['categories']\n products = cooking_form.cleaned_data['products']\n description = cooking_form.cleaned_data['description']\n steps = cooking_form.cleaned_data['cooking_steps']\n cooking_time = cooking_form.cleaned_data['cooking_time']\n image = cooking_form.cleaned_data['image']\n recipe = Recipe(title=name, description=description,products=products, cooking_steps=steps, cooking_time=cooking_time, image=image)\n category = get_object_or_404(Categories, name=category)\n # fs = FileSystemStorage()\n # fs.save(image.name, image)\n # print(image.name)\n # recipe.image = image.name\n recipe.customer = request.user\n recipe.categories= category\n recipe.save()\n\n messages.success(request, f'Рецепт сохранен. {request.user}')\n return redirect('main')\n else:\n messages.error(request, f'Не получилось сохранить рецепт. {request.user}')\n return render(request, 'add_recipe.html',\n {'form': cooking_form,\n 'title': title,\n 'head_title': head_title})\n return render(request, 'add_recipe.html',\n {'form': cooking_form,\n 'title': title,\n 'head_title': head_title,\n 'button': f'Add_recipe',})\n@csrf_exempt\ndef user_login(request):\n if request.method == 'POST':\n form = LogIn(request.POST)\n message = 'Ошибка в данных'\n if form.is_valid():\n cd = form.cleaned_data\n user = authenticate(username=cd['username'], password=cd['password'])\n if user is not None:\n if user.is_active:\n login(request, user)\n message = 'Authenticated successfully'\n return redirect(account)\n else:\n message = 'Disabled account'\n return render(request, 'login.html', {'form': form, 'message': message})\n else:\n message = 'Invalid login'\n return render(request, 'login.html', {'form': form, 'message': message})\n else:\n form = LogIn()\n message = 'Заполните форму'\n return render(request, 'login.html', {'form': form, 'message': message, 'button': 'Login'})\n\ndef user_logout(request):\n logout(request)\n return redirect(user_login)\n\ndef register(request):\n title = 'Регистрация'\n if request.method == 'POST':\n message = 'Ошибка в данных'\n user_form = UserRegistrationForm(request.POST)\n if user_form.is_valid():\n new_user = user_form.save(commit=False)\n new_user.set_password(user_form.cleaned_data['password'])\n new_user.save()\n messages.success(request, f'Пользователь сохранен. Можете авторизоваться')\n return redirect('login')\n else:\n user_form = UserRegistrationForm()\n message = 'Заполните форму'\n return render(request, 'account_reg.html', {'user_form': user_form, 'title': title,'message': message, 'button': 'Register'})\n\n@login_required\ndef account(request):\n user = get_object_or_404(User, username=request.user)\n recipe = Recipe.objects.filter(customer=user.id).order_by(\"-id\")\n return render(request, 'account.html', {\"Recipes\": recipe[:9]})\n\n","repo_name":"Marassanovad/Recipe_site_Django","sub_path":"project/recipe/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5768756594","text":"import os\nimport math\n\n# screen\nSCREEN = WIDTH, HEIGTH = 1280, 768\nCENTER = H_WIDTH, H_HEIGHT = WIDTH // 2, HEIGTH // 2\n\n# player\nPLAYER_SPEED = 0.09\nPLAYER_ROT_SPEED = 0.0008\nPLAYER_RELOAD_TIME = 700\n\n# bullet\nBULLET_SPEED = 0.3\n\n# map tiles\nTILE_W, TILE_H = 128, 128\n\n# map\nMAP_WIDTH, MAP_HEIGHT = WIDTH // TILE_W, HEIGTH // TILE_H\n\n# fps\nFPS = 60\n\n# colors\nBG = (20, 20, 20)\n\n# paths\nDIR = os.path.dirname(__file__)\nDATA = f'{DIR}/data'\nASSETS = f'{DATA}/assets'\nTANKS_DIR = f'{ASSETS}/tanks'\nBULLETS_DIR = f'{ASSETS}/bullets'\nENVIRONMENT_DIR = f'{ASSETS}/environment'\nPARTICLES_DIR = f'{ASSETS}/particles'\n\n# tanks\nTANK_SPEED = PLAYER_SPEED\nTANK_ROT_SPEED = PLAYER_ROT_SPEED\n\n# enemy\nLEFT_EDGE = math.pi - math.radians(1)\nRIGHT_EDGE = math.pi + math.radians(1)\nENEMY_RELOAD_TIME = 3000\n\n# math\nHALF_PI = math.pi / 2","repo_name":"ROUVELL/Tanks","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20186147759","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nimport os\n\nA = 12\nf = 50\nt = np.linspace(0, 2/f, 1000)\nplt.plot(t, np.abs(A*np.sin(2*np.pi*f*t)))\nplt.grid()\nplt.xlabel('t')\nplt.ylabel('x(t)')\nplt.savefig('../figs/1_1.png')\nos.system('sh gopen.sh ../figs/1_1.png')\n","repo_name":"gadepall/signal-processing","sub_path":"ee3900-assignments/charger/codes/1_1.py","file_name":"1_1.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"11618987426","text":"# -*- coding: utf-8 -*-\n#\n# Sphinx documentation build configuration file\n\nimport re\nimport sphinx\n\nextensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo',\n 'sphinx.ext.autosummary', 'sphinx.ext.extlinks',\n 'sphinx.ext.intersphinx']\n\nmaster_doc = 'index'\n#templates_path = ['_templates']\nexclude_patterns = ['_build']\n\nautodoc_default_flags = ['members', 'undoc-members',\n 'show-inheritance', 'synopsis']\n\nproject = 'lom2mlr'\ncopyright = u'2012, GTN-Qu\\u00e9bec'\n#version = lom2mlr.__released__\nversion = \"0.1\"\nrelease = version\nshow_authors = True\n\nhtml_theme = 'sphinxdoc'\nmodindex_common_prefix = ['lom2mlr.']\nhtml_static_path = ['_static']\nhtml_sidebars = {} # 'index': ['indexsidebar.html', 'searchbox.html']}\n# html_additional_pages = {'index': 'index.rst'}\nhtml_use_opensearch = 'https://github.com/GTN-Quebec/lom2mlr'\n\nhtmlhelp_basename = 'lom2mlr'\n\nepub_theme = 'epub'\nepub_basename = 'lom2mlr'\nepub_author = 'Marc-Antoine Parent'\nepub_publisher = 'https://gtn-quebec.org/'\nepub_scheme = 'url'\nepub_identifier = epub_publisher\nepub_pre_files = [('index.html', 'Welcome')]\nepub_exclude_files = ['_static/opensearch.xml', '_static/doctools.js',\n '_static/jquery.js', '_static/searchtools.js',\n '_static/underscore.js', '_static/basic.css',\n 'search.html']\n\nlatex_documents = [('contents', 'lom2mlr.tex', 'Lom2MLR Documentation',\n 'Marc-Antoine Parent', 'manual', 1)]\n#latex_logo = '_static/lom2mlr.png'\nlatex_elements = {\n 'fontpkg': '\\\\usepackage{palatino}',\n}\nlatex_show_urls = 'footnote'\n\nautodoc_member_order = 'groupwise'\ntodo_include_todos = True\n\nextlinks = {\n 'rdflib_api': ('https://rdflib.readthedocs.org/en/latest/_static/api/'\n 'index.html#%s', ''),\n 'markdown-ext': ('http://packages.python.org/Markdown/extensions/api.html#%s', ''),\n 'etree': ('http://effbot.org/zone/element.htm#%s', ''),\n 'lxml-func': ('http://lxml.de/api/lxml.etree-module.html#%s', 'lxml.etree.'),\n 'lxml-class': ('http://lxml.de/api/lxml.etree.%s-class.html', 'lxml.etree.'),\n 'vobject-func': ('http://vobject.skyhouseconsulting.com/epydoc/public/vobject.vobject-module.html#%s', 'vobject.vobject.'),\n 'vobject-class': ('http://vobject.skyhouseconsulting.com/epydoc/public/vobject.vobject.%s-class.html', 'vobject.vobject.'),\n 'vcard-func': ('http://vobject.skyhouseconsulting.com/epydoc/public/vobject.vcard-module.html#%s', 'vobject.vcard.'),\n 'vcard-class': ('http://vobject.skyhouseconsulting.com/epydoc/public/vobject.vcard.%s-class.html', 'vobject.vcard.'),\n }\n\nman_pages = [\n ('contents', 'lom2mlr-all', 'lom2mlr transformation engine',\n 'Marc-Antoine Parent', 1),\n]\n\ntexinfo_documents = [\n ('contents', 'lom2mlr', 'lom2mlr Documentation', 'Marc-Antoine Parent',\n 'lom2mlr', 'The lom2mlr transformation engine.', 'Documentation tools',\n 1),\n]\n\nintersphinx_mapping = {\n 'python': ('http://docs.python.org/2.7/', None),\n 'rdflib': ('http://rdflib.readthedocs.org/en/latest/', None)}\n\n\n# -- Extension interface ------------------------------------------------------\n\nfrom sphinx import addnodes\n\n\nevent_sig_re = re.compile(r'([a-zA-Z-]+)\\s*\\((.*)\\)')\n\n\ndef parse_event(env, sig, signode):\n m = event_sig_re.match(sig)\n if not m:\n signode += addnodes.desc_name(sig, sig)\n return sig\n name, args = m.groups()\n signode += addnodes.desc_name(name, name)\n plist = addnodes.desc_parameterlist()\n for arg in args.split(','):\n arg = arg.strip()\n plist += addnodes.desc_parameter(arg, arg)\n signode += plist\n return name\n\n\ndef setup(app):\n from sphinx.ext.autodoc import cut_lines\n from sphinx.util.docfields import GroupedField\n #app.connect('autodoc-process-docstring', cut_lines(4, what=['module']))\n app.add_object_type('confval', 'confval',\n objname='configuration value',\n indextemplate='pair: %s; configuration value')\n fdesc = GroupedField('parameter', label='Parameters',\n names=['param'], can_collapse=True)\n app.add_object_type('event', 'event', 'pair: %s; event', parse_event,\n doc_field_types=[fdesc])\n","repo_name":"GTN-Quebec/lom2mlr","sub_path":"docs/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":4295,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"9816641690","text":"import tensorflow as tf\r\nimport emnist\r\n\r\nx_train, y_train = emnist.extract_training_samples(\"byclass\")\r\nx_test, y_test = emnist.extract_test_samples(\"byclass\")\r\n\r\nx_train = x_train.astype('float32') / 255.0\r\nx_test = x_test.astype('float32') / 255.0\r\n\r\ny_train = tf.keras.utils.to_categorical(y_train, num_classes=62)\r\ny_test = tf.keras.utils.to_categorical(y_test, num_classes=62)\r\n\r\nmodel = tf.keras.models.Sequential()\r\nmodel.add(tf.keras.layers.Flatten(input_shape=(28, 28))) # Вхідний шар\r\nmodel.add(tf.keras.layers.Dense(units=62, activation='softmax')) # Вихідний шар \r\nmodel.compile(optimizer=\"adam\",\r\n loss=\"categorical_crossentropy\",\r\n metrics=[\"accuracy\"])\r\n\r\nbatch_size = 60\r\nepochs = 10\r\nmodel.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.2)\r\n\r\ntest_loss, test_accuracy = model.evaluate(x_test, y_test, verbose=2)\r\nprint(f\"Test Accuracy: {test_accuracy*100:.2f}%\")\r\n","repo_name":"kitchensink20/AI-labs","sub_path":"lab1.py","file_name":"lab1.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73038521333","text":"# This file is part of ranger, the console file manager.\n# License: GNU GPL version 3, see the file \"AUTHORS\" for details.\n\n\"\"\"The main function responsible to initialize the FM object and stuff.\"\"\"\n\nfrom __future__ import (absolute_import, division, print_function)\n\nfrom logging import getLogger\nimport locale\nimport os.path\nimport sys\nimport tempfile\n\nfrom ranger import VERSION\n\n\nLOG = getLogger(__name__)\n\nVERSION_MSG = [\n 'ranger version: {0}'.format(VERSION),\n 'Python version: {0}'.format(' '.join(line.strip() for line in sys.version.splitlines())),\n 'Locale: {0}'.format('.'.join(str(s) for s in locale.getlocale())),\n]\n\n\ndef main(\n # pylint: disable=too-many-locals,too-many-return-statements\n # pylint: disable=too-many-branches,too-many-statements\n):\n \"\"\"initialize objects and run the filemanager\"\"\"\n import ranger.api\n from ranger.container.settings import Settings\n from ranger.core.shared import FileManagerAware, SettingsAware\n from ranger.core.fm import FM\n from ranger.ext.logutils import setup_logging\n from ranger.ext.openstruct import OpenStruct\n\n ranger.args = args = parse_arguments()\n ranger.arg = OpenStruct(args.__dict__) # COMPAT\n setup_logging(debug=args.debug, logfile=args.logfile)\n\n for line in VERSION_MSG:\n LOG.info(line)\n LOG.info('Process ID: %s', os.getpid())\n\n try:\n locale.setlocale(locale.LC_ALL, '')\n except locale.Error:\n print(\"Warning: Unable to set locale. Expect encoding problems.\")\n\n # so that programs can know that ranger spawned them:\n level = 'RANGER_LEVEL'\n if level in os.environ and os.environ[level].isdigit():\n os.environ[level] = str(int(os.environ[level]) + 1)\n else:\n os.environ[level] = '1'\n\n if 'SHELL' not in os.environ:\n os.environ['SHELL'] = 'sh'\n\n LOG.debug(\"cache dir: '%s'\", args.cachedir)\n LOG.debug(\"config dir: '%s'\", args.confdir)\n LOG.debug(\"data dir: '%s'\", args.datadir)\n\n if args.copy_config is not None:\n fm = FM()\n fm.copy_config_files(args.copy_config)\n return 0\n if args.list_tagged_files:\n fm = FM()\n try:\n if sys.version_info[0] >= 3:\n fobj = open(fm.datapath('tagged'), 'r', errors='replace')\n else:\n fobj = open(fm.datapath('tagged'), 'r')\n except OSError:\n pass\n else:\n for line in fobj.readlines():\n if len(line) > 2 and line[1] == ':':\n if line[0] in args.list_tagged_files:\n sys.stdout.write(line[2:])\n elif line and '*' in args.list_tagged_files:\n sys.stdout.write(line)\n return 0\n\n SettingsAware.settings_set(Settings())\n\n if args.selectfile:\n args.selectfile = os.path.abspath(args.selectfile)\n args.paths.insert(0, os.path.dirname(args.selectfile))\n\n paths = args.paths or ['.']\n paths_inaccessible = []\n for path in paths:\n try:\n path_abs = os.path.abspath(path)\n except OSError:\n paths_inaccessible += [path]\n continue\n if not os.access(path_abs, os.F_OK):\n paths_inaccessible += [path]\n if paths_inaccessible:\n print(\"Inaccessible paths: %s\" % paths)\n return 1\n\n profile = None\n exit_msg = ''\n exit_code = 0\n try:\n # Initialize objects\n fm = FM(paths=paths)\n FileManagerAware.fm_set(fm)\n load_settings(fm, args.clean)\n\n if args.list_unused_keys:\n from ranger.ext.keybinding_parser import (special_keys,\n reversed_special_keys)\n maps = fm.ui.keymaps['browser']\n for key in sorted(special_keys.values(), key=str):\n if key not in maps:\n print(\"<%s>\" % reversed_special_keys[key])\n for key in range(33, 127):\n if key not in maps:\n print(chr(key))\n return 0\n\n if not sys.stdin.isatty():\n sys.stderr.write(\"Error: Must run ranger from terminal\\n\")\n raise SystemExit(1)\n\n if fm.username == 'root':\n fm.settings.preview_files = False\n fm.settings.use_preview_script = False\n LOG.info(\"Running as root, disabling the file previews.\")\n if not args.debug:\n from ranger.ext import curses_interrupt_handler\n curses_interrupt_handler.install_interrupt_handler()\n\n # Create cache directory\n if fm.settings.preview_images and fm.settings.use_preview_script:\n if not os.path.exists(args.cachedir):\n os.makedirs(args.cachedir)\n # Create data directory\n if not args.clean:\n if not os.path.exists(args.datadir):\n os.makedirs(args.datadir)\n\n # Run the file manager\n fm.initialize()\n ranger.api.hook_init(fm)\n fm.ui.initialize()\n\n if args.selectfile:\n fm.select_file(args.selectfile)\n\n if args.cmd:\n for command in args.cmd:\n fm.execute_console(command)\n\n if ranger.args.profile:\n import cProfile\n import pstats\n ranger.__fm = fm # pylint: disable=protected-access\n profile_file = tempfile.gettempdir() + '/ranger_profile'\n cProfile.run('ranger.__fm.loop()', profile_file)\n profile = pstats.Stats(profile_file, stream=sys.stderr)\n else:\n fm.loop()\n\n except Exception: # pylint: disable=broad-except\n import traceback\n ex_traceback = traceback.format_exc()\n exit_msg += '\\n'.join(VERSION_MSG) + '\\n'\n try:\n exit_msg += \"Current file: {0}\\n\".format(repr(fm.thisfile.path))\n except Exception: # pylint: disable=broad-except\n pass\n exit_msg += '''\n{0}\nranger crashed. Please report this traceback at:\nhttps://github.com/ranger/ranger/issues\n'''.format(ex_traceback)\n\n exit_code = 1\n\n except SystemExit as ex:\n if ex.code is not None:\n if not isinstance(ex.code, int):\n exit_msg = ex.code\n exit_code = 1\n else:\n exit_code = ex.code\n\n finally:\n if exit_msg:\n LOG.critical(exit_msg)\n try:\n fm.ui.destroy()\n except (AttributeError, NameError):\n pass\n # If profiler is enabled print the stats\n if ranger.args.profile and profile:\n profile.strip_dirs().sort_stats('cumulative').print_callees()\n # print the exit message if any\n if exit_msg:\n sys.stderr.write(exit_msg)\n return exit_code # pylint: disable=lost-exception\n\n\ndef xdg_path(env_var):\n path = os.environ.get(env_var)\n if path and os.path.isabs(path):\n return os.path.join(path, 'ranger')\n return None\n\n\ndef parse_arguments():\n \"\"\"Parse the program arguments\"\"\"\n from optparse import OptionParser # pylint: disable=deprecated-module\n from ranger import CONFDIR, CACHEDIR, DATADIR, USAGE\n\n parser = OptionParser(usage=USAGE, version=('\\n'.join(VERSION_MSG)))\n\n parser.add_option('-d', '--debug', action='store_true',\n help=\"activate debug mode\")\n parser.add_option('-c', '--clean', action='store_true',\n help=\"don't touch/require any config files. \")\n parser.add_option('--logfile', type='string', metavar='file',\n help=\"log file to use, '-' for stderr\")\n parser.add_option('--cachedir', type='string',\n metavar='dir', default=(xdg_path('XDG_CACHE_HOME') or CACHEDIR),\n help=\"change the cache directory. (%default)\")\n parser.add_option('-r', '--confdir', type='string',\n metavar='dir', default=(xdg_path('XDG_CONFIG_HOME') or CONFDIR),\n help=\"change the configuration directory. (%default)\")\n parser.add_option('--datadir', type='string',\n metavar='dir', default=(xdg_path('XDG_DATA_HOME') or DATADIR),\n help=\"change the data directory. (%default)\")\n parser.add_option('--copy-config', type='string', metavar='which',\n help=\"copy the default configs to the local config directory. \"\n \"Possible values: all, rc, rifle, commands, commands_full, scope\")\n parser.add_option('--choosefile', type='string', metavar='PATH',\n help=\"Makes ranger act like a file chooser. When opening \"\n \"a file, it will quit and write the name of the selected \"\n \"file to PATH.\")\n parser.add_option('--choosefiles', type='string', metavar='PATH',\n help=\"Makes ranger act like a file chooser for multiple files \"\n \"at once. When opening a file, it will quit and write the name \"\n \"of all selected files to PATH.\")\n parser.add_option('--choosedir', type='string', metavar='PATH',\n help=\"Makes ranger act like a directory chooser. When ranger quits\"\n \", it will write the name of the last visited directory to PATH\")\n parser.add_option('--selectfile', type='string', metavar='filepath',\n help=\"Open ranger with supplied file selected.\")\n parser.add_option('--list-unused-keys', action='store_true',\n help=\"List common keys which are not bound to any action.\")\n parser.add_option('--list-tagged-files', type='string', default=None,\n metavar='tag',\n help=\"List all files which are tagged with the given tag, default: *\")\n parser.add_option('--profile', action='store_true',\n help=\"Print statistics of CPU usage on exit.\")\n parser.add_option('--cmd', action='append', type='string', metavar='COMMAND',\n help=\"Execute COMMAND after the configuration has been read. \"\n \"Use this option multiple times to run multiple commands.\")\n\n args, positional = parser.parse_args()\n args.paths = positional\n\n def path_init(option):\n argval = args.__dict__[option]\n try:\n path = os.path.realpath(argval)\n except OSError as ex:\n sys.stderr.write(\n '--{0} is not accessible: {1}\\n{2}\\n'.format(option, argval, str(ex)))\n sys.exit(1)\n if os.path.exists(path) and not os.access(path, os.W_OK):\n sys.stderr.write('--{0} is not writable: {1}\\n'.format(option, path))\n sys.exit(1)\n return path\n\n args.cachedir = path_init('cachedir')\n args.confdir = path_init('confdir')\n args.datadir = path_init('datadir')\n if args.choosefile:\n args.choosefile = path_init('choosefile')\n if args.choosefiles:\n args.choosefiles = path_init('choosefiles')\n if args.choosedir:\n args.choosedir = path_init('choosedir')\n\n return args\n\n\nCOMMANDS_EXCLUDE = ['settings', 'notify']\n\n\ndef load_settings( # pylint: disable=too-many-locals,too-many-branches,too-many-statements\n fm, clean):\n from ranger.core.actions import Actions\n import ranger.core.shared\n import ranger.api.commands\n from ranger.config import commands as commands_default\n\n # Load default commands\n fm.commands = ranger.api.commands.CommandContainer()\n include = [name for name in dir(Actions) if name not in COMMANDS_EXCLUDE]\n fm.commands.load_commands_from_object(fm, include)\n fm.commands.load_commands_from_module(commands_default)\n\n if not clean:\n allow_access_to_confdir(ranger.args.confdir, True)\n\n # Load custom commands\n custom_comm_path = fm.confpath('commands.py')\n if os.path.exists(custom_comm_path):\n old_bytecode_setting = sys.dont_write_bytecode\n sys.dont_write_bytecode = True\n try:\n import commands as commands_custom\n fm.commands.load_commands_from_module(commands_custom)\n except ImportError as ex:\n LOG.debug(\"Failed to import custom commands from '%s'\", custom_comm_path)\n LOG.exception(ex)\n else:\n LOG.debug(\"Loaded custom commands from '%s'\", custom_comm_path)\n sys.dont_write_bytecode = old_bytecode_setting\n\n allow_access_to_confdir(ranger.args.confdir, False)\n\n # Load rc.conf\n custom_conf = fm.confpath('rc.conf')\n default_conf = fm.relpath('config', 'rc.conf')\n\n if os.environ.get('RANGER_LOAD_DEFAULT_RC', 'TRUE').upper() != 'FALSE':\n fm.source(default_conf)\n if os.access(custom_conf, os.R_OK):\n fm.source(custom_conf)\n\n allow_access_to_confdir(ranger.args.confdir, True)\n\n # XXX Load plugins (experimental)\n plugindir = fm.confpath('plugins')\n try:\n plugin_files = os.listdir(plugindir)\n except OSError:\n LOG.debug('Unable to access plugin directory: %s', plugindir)\n else:\n plugins = [p[:-3] for p in plugin_files\n if p.endswith('.py') and not p.startswith('_')]\n if not os.path.exists(fm.confpath('plugins', '__init__.py')):\n LOG.debug(\"Creating missing '__init__.py' file in plugin folder\")\n fobj = open(fm.confpath('plugins', '__init__.py'), 'w')\n fobj.close()\n\n ranger.fm = fm\n for plugin in sorted(plugins):\n try:\n try:\n # importlib does not exist before python2.7. It's\n # required for loading commands from plugins, so you\n # can't use that feature in python2.6.\n import importlib\n except ImportError:\n module = __import__('plugins', fromlist=[plugin])\n else:\n module = importlib.import_module('plugins.' + plugin)\n fm.commands.load_commands_from_module(module)\n LOG.debug(\"Loaded plugin '%s'\", plugin)\n except Exception as ex: # pylint: disable=broad-except\n ex_msg = \"Error while loading plugin '{0}'\".format(plugin)\n LOG.error(ex_msg)\n LOG.exception(ex)\n fm.notify(ex_msg, bad=True)\n ranger.fm = None\n\n allow_access_to_confdir(ranger.args.confdir, False)\n else:\n fm.source(fm.relpath('config', 'rc.conf'))\n\n\ndef allow_access_to_confdir(confdir, allow):\n from errno import EEXIST\n\n if allow:\n try:\n os.makedirs(confdir)\n except OSError as err:\n if err.errno != EEXIST: # EEXIST means it already exists\n print(\"This configuration directory could not be created:\")\n print(confdir)\n print(\"To run ranger without the need for configuration\")\n print(\"files, use the --clean option.\")\n raise SystemExit\n else:\n LOG.debug(\"Created config directory '%s'\", confdir)\n if confdir not in sys.path:\n sys.path[0:0] = [confdir]\n else:\n if sys.path[0] == confdir:\n del sys.path[0]\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/ranger_ranger/ranger-master/ranger/core/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15401,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"34113143849","text":"\nimport getopt\nimport json\nfrom os import chdir, getcwd, mkdir, path\nimport subprocess\nfrom sys import argv\nfrom typing import List\n\n\nclass Config:\n \"\"\"\n Data class for patch configuration.\n \"\"\"\n def __init__(self,switch,case,operator=-1,variable=-1,constant=-1) -> None:\n self.switch = switch\n self.case = case\n self.operator = operator\n self.variable = variable\n self.constant = constant\n \n def __str__(self) -> str:\n result=f'{self.switch}-{self.case}'\n if self.operator != -1:\n result += f'-{self.operator}'\n if self.variable != -1:\n result += f'-{self.variable}-{self.constant}'\n return result\n\nclass SwitchInfo:\n \"\"\"\n Data class for switch information.\n Includes switch number, patch appearance, file, begin/end line/column.\n \"\"\"\n def __init__(self, switch_num:int,patches:List[str],file_name:str,begin_line:int,end_line:int,begin_column:int,end_column:int) -> None:\n self.switch_num = switch_num\n self.patches = patches\n self.file_name = file_name\n self.begin_line = begin_line\n self.end_line = end_line\n self.begin_column = begin_column\n self.end_column = end_column\n\ndef insert_patch(original_file:str,backup_file:str,begin_line:int,begin_column:int,end_line:int,end_column:int,patch:str):\n \"\"\"\n Insert a patch string into actual source file, and generate patched file.\n original_file: original source file\n backup file: backup file name read from __backup.log\n patch: patch string read from switch-info.json\n \"\"\"\n # line informations may have function declaration for patch generation\n begin_line-=2\n end_line-=2\n with open(backup_file,'r') as file:\n lines=file.readlines()\n\n previous_lines=[]\n post_lines:List[str]=[]\n for i,line in enumerate(lines):\n if i<=begin_line:\n previous_lines.append(line)\n if i>=end_line:\n post_lines.append(line)\n\n first_line:str=previous_lines[-1]\n last_line=post_lines[0]\n previous_first_line=first_line[:begin_column-1]\n if previous_first_line[-1].isalpha():\n previous_first_line=first_line[:begin_column+1]\n last_first_line:str=last_line[end_column+1:]\n if (previous_first_line[-5:]=='else ' or previous_first_line[-4:]=='else') and last_first_line[:2]=='if':\n previous_first_line += '{ '\n # TODO: Find finish of next if and add }\n breacket_counter=0\n is_finish_then=False\n is_finish_else=False\n is_finish=False\n has_else=False\n else_counter=0\n for i,c in enumerate(last_first_line):\n if c=='{':\n # Next IfStmt has CompoundStmt and it's start of it\n breacket_counter+=1\n elif not c.isspace() and is_finish_then and not has_else and breacket_counter==0:\n # then finish, no else\n is_finish=True\n last_first_line=last_first_line[:i+1]+'}\\n'+last_first_line[i+1:]\n break\n elif c==';' and breacket_counter==0:\n # Next IfStmt has other statement for then branch\n is_finish_then=True\n elif c=='}':\n breacket_counter-=1\n if has_else and breacket_counter==0:\n # else finish\n is_finish_else=True\n is_finish=True\n last_first_line=last_first_line[:i+1]+'}'+last_first_line[i+1:]\n break\n elif breacket_counter==0:\n is_finish_then=True\n\n elif c=='e' and not has_else and is_finish_then:\n else_counter+=1\n elif c=='l' and else_counter==1:\n else_counter+=1\n elif c=='s' and else_counter==2:\n else_counter+=1\n elif c=='e' and else_counter==3:\n has_else=True\n \n if not is_finish:\n for i,line in enumerate(post_lines[1:]):\n for j,c in enumerate(line):\n if c=='{':\n # Next IfStmt has CompoundStmt and it's start of it\n breacket_counter+=1\n elif not c.isspace() and is_finish_then and not has_else and breacket_counter==0:\n # then finish, no else\n is_finish=True\n post_lines[i+1]=line[:j]+'}'+line[j:]\n break\n elif c==';' and breacket_counter==0:\n # Next IfStmt has other statement for then branch\n is_finish_then=True\n elif c=='}':\n breacket_counter-=1\n if has_else and breacket_counter==0:\n # else finish\n is_finish_else=True\n is_finish=True\n post_lines[i+1]=line[:j]+'}'+line[j:]\n break\n elif breacket_counter==0:\n is_finish_then=True\n elif c=='e' and not has_else and is_finish_then:\n else_counter+=1\n elif c=='l' and else_counter==1:\n else_counter+=1\n elif c=='s' and else_counter==2:\n else_counter+=1\n elif c=='e' and else_counter==3:\n has_else=True\n if is_finish:\n break\n\n patch_lines=patch.splitlines()\n # Remove comment\n while patch_lines.count('')>0:\n patch_lines.remove('')\n while patch_lines.count('\\t')>0:\n patch_lines.remove('\\t')\n\n patch_first_line=previous_first_line+ patch_lines[0]\n if len(patch_lines)>1:\n patch_last_line=patch_lines[-1] + last_first_line\n else:\n patch_last_line=last_first_line\n\n del previous_lines[-1]\n del post_lines[0]\n actual_lines=previous_lines+[patch_first_line]+patch_lines[1:-1]+[patch_last_line]+post_lines\n with open('patched_'+original_file,'w') as file:\n for line in actual_lines:\n if line[-1]!='\\n':\n file.write(line+\"\\n\")\n else:\n file.write(line)\n\n return 'patched_'+original_file\n\ndef replace_actual_condition(config:Config,patch:str):\n \"\"\"\n Replace abstract condition with actual condition.\n If abstract condition not found(a.k.a. normal patch), do nothing.\n \"\"\"\n if patch.find('__is_neg')!=-1:\n start_abst_cond=patch.find('__is_neg')\n end_abst_cond=-1\n is_start=False\n counter=0\n for i,char in enumerate(patch):\n if i\n\nGenerate patched source file with patch configuration. Generated source file will be 'patched_'.\n: work directory of program.\nIf -c option is specified, generate diff file with specified patch configuration. Otherwise, generate diff files of all plausible patches.\nOne of -c or -i option should be specified.\n\nOptions:\n -g: generate diff file.\n -h: show help.\n -c : specify patch configuration. : -[-[--]].\n -i : result file of MSV-search.\n -o : output directory of diff files. \"\"\")\n exit(0)\n\n if config_str is None and input_file is None:\n print('Patch configuration or result file of MSV-search is required.')\n exit(1)\n\n work_dir=args[0]\n config=[]\n if config_str is not None:\n config_strs=config_str.split('-')\n if len(config_strs)==2:\n config.append(Config(int(config_strs[0]),int(config_strs[1])))\n elif len(config_strs)==3:\n config.append(Config(int(config_strs[0]),int(config_strs[1]),int(config_strs[2])))\n else:\n config.append(Config(int(config_strs[0]),int(config_strs[1]),int(config_strs[2]),int(config_strs[3]),int(config_strs[4])))\n else:\n result_file=open(input_file+'/msv-result.json','r')\n result_root=json.load(result_file)\n result_file.close()\n\n for result in result_root:\n if result['pass_result']:\n cur_config=result['config'][0]\n switch=cur_config['switch']\n case=cur_config['case']\n if cur_config['is_cond']:\n if cur_config['operator']==4:\n config_obj=Config(switch,case,4)\n else:\n config_obj=Config(switch,case,cur_config['operator'],cur_config['variable'],cur_config['constant'])\n else:\n config_obj=Config(switch,case)\n\n config.append(config_obj)\n \n orig_dir=getcwd()\n chdir(work_dir)\n\n info_file=open('switch-info.json','r')\n info=json.load(info_file)\n info_file.close()\n\n switch_list:List[SwitchInfo]=[]\n files=info['rules']\n for file in files:\n for line in file['lines']:\n for switch in line['switches']:\n new_switch=SwitchInfo(switch['switch'],switch['patch_codes'],file['file_name'],switch['begin_line'],switch['end_line'],switch['begin_column'],switch['end_column'])\n switch_list.append(new_switch)\n \n\n for conf in config:\n for switch in switch_list:\n if switch.switch_num==conf.switch:\n current_switch=switch\n\n file_name=current_switch.file_name.split('/')[-1]\n backup_log_file='__backup.log'\n with open(backup_log_file,'r') as file:\n backuped_file=file.readlines()\n for i,file in enumerate(backuped_file):\n if file.strip()==current_switch.file_name:\n backup_index=i\n break\n fixed_file='fixed_'+file_name\n original_file=f'__backup{backup_index}'\n\n patch=current_switch.patches[conf.case-1]\n if patch[-1]!=';' and patch[-1]!='}' and patch[-1]!='\\n':\n patch+=';'\n patch=replace_actual_condition(conf,patch)\n print(f'patch:\\n{patch}')\n patched_file=insert_patch(file_name,original_file,current_switch.begin_line,current_switch.begin_column,current_switch.end_line,current_switch.end_column,patch)\n\n if gen_diff:\n generate_diff(file_name,original_file,current_switch.file_name,conf,output_file)\n\n chdir(orig_dir)","repo_name":"CasinoRepair/SimAPR","sub_path":"SimAPR/diff_gen.py","file_name":"diff_gen.py","file_ext":"py","file_size_in_byte":13723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29112988076","text":"from typing import List\n\n\nclass HostConfiguration:\n def __init__(\n self,\n sshName: str,\n hostName: str,\n hostIP: str,\n hostPort: int,\n numNICPorts: int,\n numNUMAs: int,\n firstCoreIndex: int,\n numCores: int,\n numServers: int,\n numDrivers: int,\n numWorkerList: List[int],\n ) -> None:\n self.sshName = sshName\n self.hostName = hostName\n self.hostIP = hostIP\n self.hostPort = hostPort\n self.numNICPorts = numNICPorts\n self.numNUMAs = numNUMAs\n self.firstCoreIndex = firstCoreIndex\n self.numCores = numCores\n self.numServers = numServers\n self.numDrivers = numDrivers\n\n self.numWorkerList = numWorkerList\n assert len(self.numWorkerList) == self.numServers\n\n def __str__(self) -> str:\n return \":\".join(\n [\n self.hostName,\n self.hostIP,\n str(self.hostPort),\n str(self.numNICPorts),\n str(self.numNUMAs),\n str(self.firstCoreIndex),\n str(self.numCores),\n str(self.numServers),\n str(self.numDrivers),\n ]\n )\n\n def printNumWokerList(self) -> str:\n return \" \".join([str(n) for n in self.numWorkerList])\n\n\ndef printHostsStr(hostsConfigurations: List[HostConfiguration]) -> str:\n return \" \".join([str(c) for c in hostsConfigurations])\n\n\ndef printWorkerCount(hostsConfigurations: List[HostConfiguration]) -> str:\n s = \"\"\n for hostConfig in hostsConfigurations:\n if hostConfig.numServers > 0:\n s += hostConfig.printNumWokerList() + \" \"\n return s\n","repo_name":"uwaterloo-mast/malcolm","sub_path":"scripts/HostConfiguration.py","file_name":"HostConfiguration.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"33194495402","text":"from unittest import TestCase\nfrom taqtile.recent_runner import RecentRunner\nfrom dmenu import list_executables\n\n\nclass RecentRunnerTest(TestCase):\n def test_list(self):\n rr = RecentRunner(\"qtile_run\", dbpath=\"~/.qtile_run_test.db\")\n self.assertGreater(len(rr.list(list_executables())), 1)\n\n def test_recent_run(self):\n rr = RecentRunner(\"qtile_run\", dbpath=\"~/.qtile_run_test.db\")\n rr.insert(\"testcmd\")\n rr.insert(\"testcmd2\")\n rr.insert(\"testcmd1\")\n rr.insert(\"cmdtest\")\n rr.insert(\"blah\")\n for res in rr.recent(\"cmd\"):\n self.assertTrue(res.startswith(\"cmd\"))\n results = rr.recent(\"test\")\n print(results)\n","repo_name":"jagguli/TAQtile","sub_path":"taqtile/recent_runner_test.py","file_name":"recent_runner_test.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"38594237306","text":"import util\nimport importlib.util\nimport mpmath\nimport mpmath_util\nimport matplotlib.pyplot as plt\nimport matplotlib\n\ndef filename(param, a, b, method, MDS, prec, phi, M):\n d = \"inverse_power\" + param + \"_a\" + a + \"_b\" + b + \"_\" + method\n d += \"_MDS\" + str(MDS) + \"_prec\" + str(prec)\n f = method + \"_\" + phi + \"_max_abs_E_M\" + f\"{M:04}\" + \".dat\"\n return d + \"/\" + f\nparam = [\"0.5\", \"1\", \"2\"]\nlog2a = [-1, -10]\nprec = [248, 184]\nymin = [30, 0.7]\nymax = [180, 12]\nyticks_start = [50, 1]\nyticks_stop = [200, 13]\nyticks_step = [50, 2]\na = [str(2**e) for e in log2a]\nb = \"1\"\nmethod = [\"best\", \"gauss\", \"gauss\", \"gauss\", \"gauss\", \"gauss\"]\nphi = [\"P2\", \"Phi\", \"exp\", \"P2\", \"P1\", \"R0_1\"]\ncolor = [\"C0\", \"C1\", \"C2\", \"C3\", \"C4\", \"C5\"]\nmarker = [\"+\", \"x\", \"1\", \"2\", \"3\", \"4\"]\nlinestyle = [\"solid\", \"solid\", \"dotted\", \"dashed\", \"dashdot\", \"dashdot\"]\nMDS_list = []\nMDS_list.append([96, 96, 96, 96, 96, 96])\nMDS_list.append([192, 1536, 1536, 1536, 1536, 1536])\nMmax = 17\n\nutil.print_sys_version()\nmpmath_util.print_version()\n\ndef plot_error_ratio(ax, Er_list, method, phi, r):\n for i in range(len(Er_list)):\n M = range(1, len(Er_list[i]) + 1)\n label = \"best\"\n if method[i] == \"gauss\":\n phii = importlib.import_module(phi[i]).phi(r)\n hatrho2 = mpmath.power(phii.hatrho(), 2)\n latex = phii.latexname(\"a/b\")\n ax.axhline(hatrho2, linewidth = linewidth, linestyle = linestyle[i],\n color = \"k\", label = r\"$\\hat{{\\rho}}[{}]^2$\".format(latex),\n zorder = 1)\n label = r\"${}$\".format(latex)\n\n ax.scatter(M, Er_list[i], color = color[i], marker = marker[i], label = label)\n\nmatplotlib.rcParams[\"mathtext.fontset\"] = \"cm\"\nlinewidth = 1\nfont_size = 10\nplt.rc(\"font\", size = font_size)\nwidth = 5\nheight = 6\nplt.figure(figsize = (width, height))\n\nnrows = len(param)\nncols = len(a)\nfor i in range(nrows):\n for j in range(ncols):\n mpmath.mp.prec = prec[j]\n mpmath_util.print_prec()\n Er_list = []\n for k in range(len(phi)):\n fl = [filename(param[i], a[j], b, method[k], MDS_list[j][k],\n prec[j], phi[k], M) for M in range(1, Mmax + 1)]\n E = [mpmath_util.read_vector(f)[0] for f in fl]\n Er = [E[l] / E[l + 1] for l in range(len(E) - 1)]\n Er_list.append(Er)\n\n print(\"E_M/E_{M+1}\", \"param\", param[i], \"a\", a[j], \"b\", b)\n print(\"row:method_phi\", \" \".join([m + \"_\" + p for m, p in zip(method, phi)]))\n print(\"col:M\", \" \".join([str(M) for M in range(1, Mmax)]))\n for Er in Er_list:\n print(\" \".join([mpmath.nstr(Eri).ljust(8) for Eri in Er]))\n\n ax = plt.subplot2grid((nrows, ncols), (i, j), colspan = 1)\n ax.set_ylim(ymin[j], ymax[j])\n r = mpmath.mpf(a[j]) / mpmath.mpf(b)\n plot_error_ratio(ax, Er_list, method, phi, r)\n\n ax.text(0.3, 0.89, r\"$\\eta = {}, a = 2^{{{}}}, b = {}$\".format(\n param[i], log2a[j], b), transform = ax.transAxes)\n xlab = (i == nrows - 1)\n ax.tick_params(labelbottom = xlab)\n if xlab:\n ax.set_xlabel(r\"$M$\")\n ax.set_xticks(range(1, Mmax, 5))\n if j == 0:\n ax.set_ylabel(r\"$E_M/E_{M+1}$\")\n ax.set_yticks(range(yticks_start[j], yticks_stop[j], yticks_step[j]))\n\nhandles, labels = ax.get_legend_handles_labels()\nleg = plt.legend(handles, labels, loc = \"upper right\",\n bbox_to_anchor = (1.04, 3.65), ncol = 4, columnspacing = 1, labelspacing = 0)\nleg.set_in_layout(False)\nplt.tight_layout(pad = 0, rect = (0, 0, 1, 0.87))\nf = __file__.replace(\".py\", \".pdf\")\nplt.savefig(f)\nprint(f)\n\n","repo_name":"ymkoyama/fcmf","sub_path":"fig8.py","file_name":"fig8.py","file_ext":"py","file_size_in_byte":3819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74402596212","text":"import os\nimport torch\nimport torch.nn as nn\nimport Reader\nimport Graph2Coo\nimport GATModel\nimport numpy as np\nimport torch.nn.functional as F\nfrom torch_geometric.data import InMemoryDataset\nimport Evaluation as eval\n\nclass train_model:\n def __init__(self, path, sampling, dimension):\n self.path = path\n self.sampling = sampling\n self.dimension = dimension\n self.data = None\n\n def train_AMNE(self):\n # 加载数据集\n dataset, data_merge, edges_list, edges_label, nodes_attr = self.data_load()\n number_graphs = len(dataset)\n model = GATModel.Net(dataset, number_graphs)\n\n # 设置目标函数和优化方法\n criterion2 = nn.BCEWithLogitsLoss()\n optimizer = torch.optim.Adam(model.parameters(), weight_decay=0.01, lr=0.001, betas=(0.9,0.999))\n\n pre_acc = 0\n for epoch in range(1, 6001):\n pre_feat, encoder_H, decoder_H, fin_feat, feat_orig = model()\n optimizer.zero_grad()\n object_function1 = 0\n for i in range(number_graphs):\n object_function1 = object_function1 + torch.norm(pre_feat[i]-decoder_H[i], p=2)#torch.norm(encoder_H[i]-decoder_H[i], p=2) #+\n object_function = object_function1 + torch.norm(feat_orig - dataset[0].x, p=2)\n object_function.backward()\n optimizer.step()\n Acc = []\n Adj = []\n for i in range(10):\n accuracy, adjust = eval.link_prediction(fin_feat, edges_list, edges_label, dimensions = 128, GCN=True)\n Acc.append(accuracy)\n Adj.append(adjust)\n average_acc = sum(Acc)/10\n average_adj = sum(Adj)/10\n print(\"----Epoch: %d -----Loss = %.4f----Accuracy = %.4f ----ADJ Score = %.4f\"%(epoch, object_function, average_acc, average_adj))\n if pre_acc < average_acc:\n max_accuracy = average_acc\n max_adjust = average_adj\n pre_acc = average_acc\n torch.save(model.state_dict(),'./model/model.pt')\n print(\" ----Max Accuracy : %.4f ---- MAx ADJ Score: %.4f ----\"%(max_accuracy, max_adjust))\n\n def data_load(self):\n path = \"./Sampling_graph/Datasets_With_Attributes/\"+ os.path.basename(self.path) + \".graph\"\n mul_nets, merge_nets, pos_edge_list, neg_edge_list, nodes_attr = Reader.data_load(path)\n mult_graphs = Graph2Coo.graphs2coo(mul_nets)\n graph_list = []\n for g in mult_graphs:\n x = torch.tensor(list(nodes_attr.values()), dtype=torch.float).to('cpu')\n g.x = x\n graph_list.append(g)\n data_merge = Graph2Coo.graphs2coo([merge_nets])[0]\n x = torch.tensor(list(nodes_attr.values()), dtype=torch.float)\n data_merge.x = x\n dataset = CreatMyDataset(graph_list, '../Benchmark/')\n edges_list, labels = get_selected_edges(pos_edge_list, neg_edge_list)\n return dataset, data_merge, edges_list, labels, nodes_attr\n\n def test_modal(self):\n path = './model/model.pt'\n dataset, data_merge, edges_list, edges_label, nodes_attr = self.data_load()\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n if os.path.exists(path):\n model = GATModel.Net(dataset, device)\n model.load_state_dict(torch.load(path))\n pre_x, normal_x, fin_feat = model.forward()\n for i in range(100):\n accuracy, adjust = eval.link_prediction(fin_feat, edges_list, edges_label)\n print(\" ---- Accuracy : %.4f ---- ADJ Score: %.4f ----\"%(accuracy, adjust))\n else:\n print('The model has saved in this file path!')\n\ndef get_selected_edges(pos_edge_list, neg_edge_list):\n edges = pos_edge_list + neg_edge_list\n labels = np.zeros(len(edges))\n labels[:len(pos_edge_list)] = 1\n return edges, labels\n\nclass CreatMyDataset(InMemoryDataset):\n def __init__(self, dataset, root, transform=None, pre_transform=None):\n self.data_list = dataset\n super(CreatMyDataset, self).__init__(root, transform, pre_transform)\n self.data, self.slices = torch.load(self.processed_paths[0])\n\n @property\n def raw_file_names(self):\n return []\n @property\n def processed_file_names(self):\n return ['CKM_Pyg.dataset']\n\n def download(self):\n pass\n\n def process(self):\n data_list = self.data_list\n data, slices = self.collate(data_list)\n torch.save((data, slices), self.processed_paths[0])\n","repo_name":"Brian-ning/HMNE","sub_path":"Source/history/AMNE-v0.py","file_name":"AMNE-v0.py","file_ext":"py","file_size_in_byte":4551,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"42022446113","text":"class Solution:\n # After the potential rotation, array is split into 2 parts and both of them are in an ascending order\n # but they are not sorted in respect to one another.\n #\n # [5,6,7,1,2,3,4]\n # After choosing the middle index there are 3 different possibilities:\n # 1) Either mid is fully in left ascending sub array\n # 2) Either mid is fully in the right ascending sub array\n # 3) Either mid is on the edge of left and right asecnding sub arrays\n # In all of these cases we should act accordingly.\n def search(self, nums: List[int], target: int) -> int:\n left = 0\n right = len(nums) - 1\n\n while left <= right:\n mid = (left + right) // 2\n if nums[mid] > nums[-1]:\n left = mid + 1\n else:\n right = mid - 1\n\n def bsearch(start, end):\n l = start\n r = end\n while l <= r:\n m = (l + r) // 2\n if nums[m] == target:\n return m\n elif nums[m] < target:\n l = m + 1\n else:\n r = m - 1\n return -1\n\n res = bsearch(0, left - 1)\n if res != -1:\n return res\n res = bsearch(left, len(nums) - 1)\n return res","repo_name":"nikasakandelidze/algorithms","sub_path":"bsearch_rotated_arr/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"40571478855","text":"import numpy as np\nimport pandas as pd\nimport os\nimport sys\n\npd.set_option('display.max_rows', 500)\n\n##### COPY__PASTE__LIB__BEGIN #####\n\nbasepath = os.path.abspath(os.path.dirname(os.path.abspath(sys.argv[0])) + '/..')\nsys.path.append(basepath)\nfrom edgar_playground.t5_lib import *\n\n##### COPY__PASTE__LIB__END #####\n\nINPUT_DIR = '../input'\n# INPUT_DIR = '../work/subsample_5000'\n\n# FEATURE_DIR = '.'\nFEATURE_DIR = '../feature/t5'\n\n# WORK_DIR= '.'\nWORK_DIR = '../work/t5'\n\n# OUTPUT_DIR = '.'\nOUTPUT_DIR = '../work/t5'\n\n# TYPE_WL = ['2JHH', '3JHH', '1JHC', '2JHC', '3JHC', '1JHN', '2JHN', '3JHN', ]\n\n# TARGET_WL = ['fc', 'sd', 'pso', 'dso']\n\nSEED = 55\nnp.random.seed(SEED)\n\n\n# train, test, structures, contributions = t5_load_data(INPUT_DIR)\n#\n# train, test = t5_load_feature_criskiev(FEATURE_DIR, train, test)\n#\n# structures = t5_merge_yukawa(INPUT_DIR, structures)\n#\n# structures = t5_load_feature_crane(FEATURE_DIR, structures)\n#\n# train, test = t5_merge_structures(train, test, structures)\n#\n# t5_distance_feature(train, test)\n#\n# train, test = t5_load_feature_artgor(FEATURE_DIR, train, test)\n#\n# train, test = t5_load_feature_giba(FEATURE_DIR, train, test)\n\n#\n# Save to and/or load from parquet\n#\n# t5_to_parquet(WORK_DIR, train, test, structures, contributions)\n\ntrain, test, structures, contributions = t5_read_parquet(WORK_DIR)\ndisp_mem_usage()\nprint(train.shape)\nprint(test.shape)\n\ndesc = train.describe()\ndesc.to_csv(f'{WORK_DIR}/t5_describe_train.csv', index=False)\ndesc.to_parquet(f'{WORK_DIR}/t5_describe_train.parquet', index=False)\n\ndesc = test.describe()\ndesc.to_csv(f'{WORK_DIR}/t5_describe_test.csv', index=False)\ndesc.to_parquet(f'{WORK_DIR}/t5_describe_test.parquet', index=False)\n\ncorr = train.corr()\ncorr.to_csv(f'{WORK_DIR}/t5_correlation_train.csv', index=False)\ncorr.to_parquet(f'{WORK_DIR}/t5_correlation_train.parquet', index=False)\n\ncorr = test.corr()\ncorr.to_csv(f'{WORK_DIR}/t5_correlation_test.csv', index=False)\ncorr.to_parquet(f'{WORK_DIR}/t5_correlation_test.parquet', index=False)\n\ntrain.fillna(-10000)\ntest.fillna(-10000)\n\ncorr = train.corr()\ncorr.to_csv(f'{WORK_DIR}/t5_correlation_fillna_train.csv', index=False)\ncorr.to_parquet(f'{WORK_DIR}/t5_correlation_fillna_train.parquet', index=False)\n\ncorr = test.corr()\ncorr.to_csv(f'{WORK_DIR}/t5_correlation_fillna_test.csv', index=False)\ncorr.to_parquet(f'{WORK_DIR}/t5_correlation_fillna_test.parquet', index=False)\n\n\nexit(0)\n\n#\n# Edike :)\n#\ntrain, test = t5_load_feature_edgar(FEATURE_DIR, train, test)\n\n#\n# Load Phase 1. OOF data Mulliken charge\n#\ntrain, test = t5_load_data_mulliken_oof(WORK_DIR, train, test)\n\n#\n# Merge contributions fact data\n#\ntrain = t5_merge_contributions(train, contributions)\n\n#\n# Predict contributions\n#\nfor c in train.columns: print(c)\n\nextra_cols = []\nextra_cols += ['mulliken_charge_0', 'mulliken_charge_1']\nextra_cols += ['qcut_subtype_0', 'qcut_subtype_1', 'qcut_subtype_2']\nX, X_test, labels = t5_prepare_columns(train, test, good_columns_extra=extra_cols)\n# t5_do_predict(train, test, TYPE_WL, TARGET_WL, PARAMS, N_FOLD, N_ESTIMATORS, SEED, X, X_test, labels, OUTPUT_DIR,\n# 't5b_contributions_train.csv', 't5b_contributions_test.csv')\n","repo_name":"EdgarPE/champs-scalar-coupling","sub_path":"edgar/t5_tool_correlation.py","file_name":"t5_tool_correlation.py","file_ext":"py","file_size_in_byte":3171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14846995317","text":"import csv\nimport codecs\nimport random\nimport numpy as np\nimport analysis as an\nimport matplotlib\nmatplotlib.use('Qt5Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nimport os\nimport sys\nif hasattr(sys, 'frozen'):\n os.environ['PATH'] = sys._MEIPASS + \";\" + os.environ['PATH']\nfrom mainUI import Ui_MainWindow\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtWidgets import QDialog, QApplication, QMainWindow\n#myfont = fm.FontProperties(fname='C:/Windows/Fonts/msyh.ttc')\n\nclass App():\n Data = []\n\n def read_Csv(self,file):\n with codecs.open(file, 'r','utf-8-sig') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reversed(list(reader)):\n #print(row)\n self.Data.append(row)\n\n\nclass Figure_Canvas(FigureCanvas): \n cmap = plt.get_cmap('Spectral')\n #colors = [cmap(i) for i in np.linspace(0, 1, 8)]\n \n color = ['#2964BA', '#F0FFBD', '#F2C029', '#6ECC7B', '#FF8335', '#3FC211',\n '#6E94CC', '#FFADA2', '#B82240', '#C0A3FF', '#88F3FE', '#FF8AC4', '#A53CC2']\n \n def __init__(self, parent=None, width=8.5, height=5, dpi=100):\n #fig = Figure(figsize=(width, height), dpi=100) \n fig = plt.figure(figsize=(width, height), dpi=100)\n plt.style.use('seaborn-whitegrid')\n\n FigureCanvas.__init__(self, fig) \n self.setParent(parent)\n self.axes = fig.add_subplot(111)\n\n def func(self,pct,sizelist):\n absolute = int(pct/100.*np.sum(sizelist))\n return \"{:.1f}%\\n({:d})\".format(pct, absolute)\n\n def chart_bar(self,labellist,sizelist,title):\n x = labellist\n y = sizelist\n width = 0.5\n self.axes.bar(x,y,width,align=\"center\")\n self.axes.set_xticks(x)\n self.axes.set_xticklabels(x ,rotation=60)\n self.axes.set_title(title)\n \n def chart_pie(self,labellist,sizelist,title):\n labels = labellist\n sizes = sizelist\n\n self.axes.pie(sizes, labels=labels, autopct=lambda pct: self.func(pct,sizes),shadow=False, startangle=90, pctdistance=0.75, colors = self.color)\n self.axes.axis('equal')\n self.axes.set_title(title)\n self.axes.legend(title=\"Months\",loc=\"center left\",bbox_to_anchor=(0.9,0.5))\n\n def chart_donut(self,labellist,sizelist,title):\n labels = labellist\n sizes = sizelist\n \n wedges = self.axes.pie(sizes, wedgeprops=dict(width=0.5), autopct='%1.1f%%', startangle=160, pctdistance=0.8, colors = self.color)\n bbox_props = dict(boxstyle=\"square,pad=0.3\", fc=\"w\", ec=\"k\", lw=0.72)\n kw = dict(arrowprops=dict(arrowstyle=\"-\"),\n bbox=bbox_props, zorder=0, va=\"center\")\n\n for i,p in enumerate(wedges[0]):\n ang = (p.theta2 - p.theta1)/2. + p.theta1\n y = np.sin(np.deg2rad(ang))\n x = np.cos(np.deg2rad(ang))\n horizontalalignment = {-1: \"right\", 1: \"left\"}[int(np.sign(x))]\n connectionstyle = \"angle,angleA=0,angleB={}\".format(ang)\n kw[\"arrowprops\"].update({\"connectionstyle\": connectionstyle})\n self.axes.annotate(\"{label} ({size})\".format(label=labels[i],size=sizes[i]), xy=(x, y), xytext=(1.4*np.sign(x), 1.4*y),\n horizontalalignment=horizontalalignment, **kw)\n\n self.axes.set_title(title,y=0.425)\n\n def chart_plot(self,labellist,sizelist,title):\n self.axes.plot(labellist,sizelist)\n \n total = 0\n for s in sizelist:\n total += s\n\n avg = total / len(sizelist)\n avglist = []\n for i in range(len(sizelist)):\n avglist.append(avg)\n\n avglegend = 'avg: ' + str(int(avglist[0]))\n\n self.axes.plot(labellist,sizelist)\n self.axes.plot(labellist,avglist,label=avglegend,linestyle='--')\n\n x = labellist\n y = sizelist\n width = 0.2\n self.axes.legend(loc='upper right', borderaxespad=2)\n self.axes.bar(x,y,width,align=\"center\")\n self.axes.set_xticks(x)\n self.axes.set_xticklabels(x, rotation=60)\n for i, val in enumerate(sizelist):\n self.axes.text(i,val, val, horizontalalignment='center',verticalalignment='bottom', fontdict={'fontweight':500, 'size':8})\n\n self.axes.set_title(title)\n\nclass AppWindow(QMainWindow, Ui_MainWindow):\n def __init__(self):\n super().__init__()\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self) \n\n def eventBrowseData(self):\n data = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File',os.getcwd(),\"All Files(*csv *xls)\")\n if data:\n self.ui.pathlineEdit.setText(data[0])\n App.read_Csv(App,data[0])\n self.drawChart_ovDate()\n self.drawChart_ovCate()\n self.addTable_list()\n \n def drawChart_ovDate(self):\n dir = an.getalldateAmount_month(App.Data)\n labellist = []\n sizelist = []\n for key, value in dir.items():\n labellist.append(key)\n sizelist.append(value)\n\n dr_ovDate1 = Figure_Canvas()\n dr_ovDate1.chart_pie(labellist, sizelist,'hi\\n')\n dr_ovDate2 = Figure_Canvas()\n dr_ovDate2.chart_plot(labellist,sizelist,'ff\\n')\n dr_ovDate3 = Figure_Canvas()\n dr_ovDate3.chart_bar(labellist,sizelist,'ff\\n')\n \n graphicscene = QtWidgets.QGraphicsScene()\n graphicscene.addWidget(dr_ovDate1)\n graphicscene2 = QtWidgets.QGraphicsScene()\n graphicscene2.addWidget(dr_ovDate2)\n graphicscene3 = QtWidgets.QGraphicsScene()\n graphicscene3.addWidget(dr_ovDate3)\n self.ui.graphicsView_ovDate1.setScene(graphicscene)\n self.ui.graphicsView_ovDate1.show()\n self.ui.graphicsView_ovDate2.setScene(graphicscene2)\n self.ui.graphicsView_ovDate2.show()\n self.ui.graphicsView_ovDate3.setScene(graphicscene3)\n self.ui.graphicsView_ovDate3.show()\n \n def drawChart_ovCate(self):\n # tab 2\n dir = an.getallCategoryAmount(App.Data)\n labellist = []\n sizelist = []\n labellist_sa = []\n sizelist_sa = []\n total = 0\n total_sa = 0\n\n for v in dir.values():\n total += v\n\n for key, value in dir.items():\n if value < total *0.012:\n labellist_sa.append(key)\n sizelist_sa.append(value)\n total_sa += value\n else:\n labellist.append(key)\n sizelist.append(value)\n\n if total_sa > 0:\n labellist.append('Small Amounts')\n sizelist.append(total_sa)\n\n title_total = 'Total\\n%i'%(total)\n title_sa = 'Small Amounts\\n%i'%(total_sa)\n dr_ovCate1 = Figure_Canvas()\n dr_ovCate1.chart_donut(labellist,sizelist,title_total)\n dr_ovCate2 = Figure_Canvas()\n dr_ovCate2.chart_donut(labellist_sa,sizelist_sa,title_sa)\n\n graphicscene = QtWidgets.QGraphicsScene()\n graphicscene.addWidget(dr_ovCate1)\n graphicscene2 = QtWidgets.QGraphicsScene()\n graphicscene2.addWidget(dr_ovCate2)\n self.ui.graphicsView_ovCate1.setScene(graphicscene)\n self.ui.graphicsView_ovCate1.show()\n self.ui.graphicsView_ovCate2.setScene(graphicscene2)\n self.ui.graphicsView_ovCate2.show()\n\n def addTable_list(self):\n # tab 3\n Data_keys = list(App.Data[0].keys())\n \n # columns\n self.ui.tableWidget_list.setColumnCount(len(Data_keys))\n self.ui.tableWidget_list.setHorizontalHeaderLabels(Data_keys)\n\n # rows\n self.ui.tableWidget_list.setRowCount(len(App.Data)-1)\n\n for x in range(self.ui.tableWidget_list.rowCount()):\n if x == 0:\n pass\n dir = App.Data[x]\n for y in range(self.ui.tableWidget_list.columnCount()):\n key = Data_keys[y]\n c = dir.get(key)\n self.ui.tableWidget_list.setItem(x,y,QtWidgets.QTableWidgetItem(c))\n \n self.ui.tableWidget_list.resizeColumnsToContents()\n \nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n win = AppWindow()\n win.show()\n sys.exit(app.exec_())\n","repo_name":"agin0634/bokk","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9146600182","text":"'''\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see .\n#####\nIsmail A Ahmed\nPaddle\nVersion 2.0\n'''\n\nimport pygame\nimport sys\nimport random\n\nglobal sped\nsped = 4\n\nbackground = (0, 0, 0)\nentity_color = (255, 255, 255)\n\ndef doRectsOverlap(rect1, rect2): #checks to see if the first rect collides coordinates with the second\n for a, b in [(rect1, rect2), (rect2, rect1)]:\n # Check if a's corners are inside b\n if ((isPointInsideRect(a.left, a.top, b)) or\n (isPointInsideRect(a.left, a.bottom, b)) or\n (isPointInsideRect(a.right, a.top, b)) or\n (isPointInsideRect(a.right, a.bottom, b))):\n return True\n\n return False\n\ndef isPointInsideRect(x, y, rect): #checks to see if the point is inside the rect\n if (x > rect.left) and (x < rect.right) and (y > rect.top) and (y < rect.bottom):\n return True\n else:\n return False\n\n\nclass Entity(pygame.sprite.Sprite):\n \"\"\"Inherited by any object in the game.\"\"\"\n\n def __init__(self, x, y, width, height):\n pygame.sprite.Sprite.__init__(self)\n\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n # This makes a rectangle around the entity, used for anything\n # from collision to moving around.\n self.rect = pygame.Rect(self.x, self.y, self.width, self.height)\n\n\nclass Paddle(Entity):\n \"\"\"\n Player controlled or AI controlled, main interaction with\n the game\n \"\"\"\n\n def __init__(self, x, y, width, height):\n try:\n super(Paddle, self).__init__(x, y, width, height)\n\n self.image = pygame.Surface([self.width, self.height])\n self.image.fill(entity_color)\n except:\n pass\n\n\nclass Player(Paddle):\n \"\"\"The player controlled Paddle\"\"\"\n\n def __init__(self, x, y, width, height):\n super(Player, self).__init__(x, y, width, height)\n\n # How many pixels the Player Paddle should move on a given frame.\n self.y_change = 0\n # How many pixels the paddle should move each frame a key is pressed.\n self.y_dist = 5\n self.newy=250\n\n def getheight(self):\n return self.height\n\n def MoveKeyDown(self, key):\n \"\"\"Responds to a key-down event and moves accordingly\"\"\"\n if (key == pygame.K_UP):\n self.y_change += -self.y_dist\n self.newy+= -self.y_dist\n\n elif (key == pygame.K_DOWN):\n self.y_change += self.y_dist\n self.newy += self.y_dist\n\n def MoveKeyUp(self, key):\n \"\"\"Responds to a key-up event and stops movement accordingly\"\"\"\n if (key == pygame.K_UP):\n self.y_change += self.y_dist\n self.newy += self.y_dist\n\n elif (key == pygame.K_DOWN):\n self.y_change += -self.y_dist\n self.newy += -self.y_dist\n\n def update(self):\n \"\"\"\n Moves the paddle while ensuring it stays in bounds\n \"\"\"\n # Moves it relative to its current location.\n self.rect.move_ip(0, self.y_change)\n # If the paddle moves off the screen, put it back on.\n if self.rect.y < 0:\n self.rect.y = 0\n elif self.rect.y > window_height - self.height:\n self.rect.y = window_height - self.height\n def heightupdate(self,height2):\n self.height=height2\n super(Player, self).__init__(self.x, self.rect.y, self.width, self.height)\n\n\nclass Enemy(Paddle):\n \"\"\"\n AI controlled paddle, simply moves towards the ball\n and nothing else.\n \"\"\"\n\n def __init__(self, x, y, width, height):\n super(Enemy, self).__init__(x, y, width, height)\n\n self.y_change = 4\n\n def update(self):\n \"\"\"\n Moves the Paddle while ensuring it stays in bounds\n \"\"\"\n # Moves the Paddle up if the ball is above,\n # and down if below.\n if ball.rect.y < self.rect.y:\n self.rect.y -= self.y_change\n elif ball.rect.y > self.rect.y:\n self.rect.y += self.y_change\n\n # The paddle can never go above the window since it follows\n # the ball, but this keeps it from going under.\n if self.rect.y + self.height > window_height:\n self.rect.y = window_height - self.height\n\n\nclass Ball(Entity):\n \"\"\"\n The ball! Moves around the screen.\n \"\"\"\n\n def __init__(self, x, y, width, height):\n super(Ball, self).__init__(x, y, width, height)\n\n self.image = pygame.Surface([width, height])\n self.image.fill(entity_color)\n\n directions = [1,-1] #makes it go other ways\n self.x_direction = random.choice(directions)\n # Positive = down, negative = up\n self.y_direction = random.choice(directions)\n # Current speed.\n self.speed = 4\n\n def update(self):\n # Move the ball!\n global Pcount\n global Ecount\n\n self.rect.move_ip(self.speed * self.x_direction,\n self.speed * self.y_direction)\n # Keep the ball in bounds, and make it bounce off the sides.\n directions = [1, -1]\n\n if self.rect.y < 0:\n self.y_direction *= -1\n\n elif self.rect.y > window_height - 20:\n self.y_direction *= -1\n\n if self.rect.x < 0:\n self.x_direction *= -1 #makes go opposite direction\n Ecount += 1 #if it hits player wall, enemy gets point for making past paddle\n self.speed = 4 #restarts speed back to original\n self.rect.x = 350 #x center of grid\n self.rect.y = 200 #y center of grid\n self.x_direction = random.choice(directions)\n # Positive = down, negative = up\n self.y_direction = random.choice(directions)\n player.heightupdate(50) #resets height back to original of 50\n\n elif self.rect.x > window_width - 20:\n self.x_direction *= -1\n Pcount += 1 #if it hits enemy wall, player gets point for making past paddle\n self.speed = 4\n self.rect.x = 350\n self.rect.y = 200\n self.x_direction = random.choice(directions)\n # Positive = down, negative = up\n self.y_direction = random.choice(directions)\n\n\npygame.init()\n\n#scores\nglobal Pcount\nPcount = 0\nglobal Ecount\nEcount = 0\n\nwindow_width = 700\nwindow_height = 400\nscreen = pygame.display.set_mode((window_width, window_height))\n\npygame.display.set_caption(\"Pong\")\n\nclock = pygame.time.Clock()\n\nglobal firsthalf\nfirsthalf = 0\n\nball = Ball(window_width / 2, window_height / 2, 20, 20)\nplayer = Player(10, window_height / 2, 20, 50)\nenemy = Enemy(window_width - 30, window_height / 2, 20, 50)\n\nall_sprites_list = pygame.sprite.Group()\nall_sprites_list.add(ball)\nall_sprites_list.add(player)\nall_sprites_list.add(enemy)\n\nbasicfont = pygame.font.SysFont(None, 35) # 35 is font size, no font type\nbasicfont2 = pygame.font.SysFont(None, 35) # 35 is font size, no font type\nhighscore = pygame.font.SysFont(None, 25) # 35 is font size, no font type\n\nwhile True:\n # Event processing here\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n player.MoveKeyDown(event.key)\n elif event.type == pygame.KEYUP:\n player.MoveKeyUp(event.key)\n\n for ent in all_sprites_list:\n ent.update()\n\n #replaces the rect1 and rect2 with ball and player so that it can be checked if they collide\n if doRectsOverlap(ball.rect, player.rect):\n if ball.rect.y < (player.rect.y+(player.getheight()/2)): #if it is the top part of the paddle\n print(\"<25\")\n ball.x_direction *= -1\n ball.y_direction *= -1\n heightvalue = player.getheight() # current height\n if heightvalue >= 15: #makes sure the paddle doesnt shrink till nothing is left\n player.heightupdate(heightvalue - 5) #reduces the height\n\n elif ball.rect.y >= (player.rect.y+(player.getheight()/2)): #if it is the bottom of the paddle\n print(\">=25\")\n ball.x_direction *= -1\n ball.y_direction *= -1\n heightvalue = player.getheight() # current height\n if heightvalue >= 15: #makes sure the paddle doesnt shrink till nothing is left\n player.heightupdate(heightvalue - 5) #reduces the height\n if ball.speed < 9: #increases speed if hits player paddle\n ball.speed += 1\n # replaces the rect1 and rect2 with ball and player so that it can be checked if they collide\n if doRectsOverlap(ball.rect, enemy.rect):\n if ball.rect.y < (enemy.rect.y+25): #if it is the top part of the paddle\n print(\"<25\")\n ball.x_direction *= -1\n ball.y_direction *= -1\n\n elif ball.rect.y >= (enemy.rect.y + 25): # if it is the bottom of the paddle\n print(\">=25\")\n ball.x_direction *= -1\n ball.y_direction *= -1\n if ball.speed < 9: #increases speed if hits enemy paddle\n ball.speed += 1\n\n screen.fill(background)\n #prints the scores to the GUI\n text = basicfont.render(\"Player: \"+str(Pcount), True, entity_color, background) # first set of parenthesis is the font color, second set is the background of the words\n screen.blit(text, (50, 10))\n text2 = basicfont2.render(\"Enemy: \"+str(Ecount), True, entity_color, background) # first set of parenthesis is the font color, second set is the background of the words\n screen.blit(text2, (540, 10))\n\n if Ecount > 2: #check to see if if player lost 3 times\n outfile = open('highscore.txt', 'a')\n outfile.write(str(Pcount) + '\\n') #stores player's high score in\n outfile.close\n infile = open('highscore.txt', 'r')\n column = []\n for line in infile: #goes through each line of the file\n if line == \"\\n\":\n this = \"doesnothing\" #ignores the new line\n else:\n column.append(int(line)) #adds the high score value to list\n high = sorted(column, reverse=True) #orders the list, biggest to smallest\n z = []\n for x in high[:10]:\n z.append(x)\n\n text = basicfont.render(\"High Scores: \"+str(z), True, entity_color, background) # first set of parenthesis is the font color, second set is the background of the words\n screen.blit(text, (190, 150))\n infile.close()\n\n all_sprites_list.draw(screen)\n\n pygame.display.flip()\n\n if Ecount > 2: #so can show high score, pause, and remove high score\n pygame.time.wait(1000)\n Pcount = 0 #restarts score back to zero\n Ecount = 0\n\n clock.tick(60)","repo_name":"ismailahmed0/Pong-New","sub_path":"paddle.py","file_name":"paddle.py","file_ext":"py","file_size_in_byte":11253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8110998681","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Oct 15 13:48:02 2018\r\n\r\n@author: june\r\n\"\"\"\r\n\r\nimport pandas as pd # package for high-performance, easy-to-use data structures and data analysis\r\nimport numpy as np # fundamental package for scientific computing with Python\r\nimport time\r\nimport os\r\nimport fast_model\r\n\r\n_DEBUG = True\r\n\r\nFILL_NA = 'nan_fill'\r\n\r\ndef one_hot_encoder(df, nan_as_category = True):\r\n original_columns = list(df.columns)\r\n df = pd.get_dummies(df, dummy_na= True,drop_first=True)\r\n new_columns = [c for c in df.columns if c not in original_columns]\r\n const_columns = [c for c in new_columns if df[c].dtype != 'object' and sum(df[c]) == 0 and np.std(df[c]) == 0]\r\n df.drop(const_columns, axis = 1, inplace = True)\r\n new_columns = [c for c in new_columns if c not in const_columns]\r\n return df, new_columns\r\n\r\ndef one_hot_encoder_plus(df, nan_as_category = True):\r\n original_columns = list(df.columns)\r\n categorical_columns = [col for col in df.columns if df[col].dtype == 'object']\r\n df2 = df[categorical_columns]\r\n df = pd.get_dummies(df, dummy_na= True, drop_first=True)\r\n new_columns = [c for c in df.columns if c not in original_columns]\r\n const_columns = [c for c in new_columns if df[c].dtype != 'object' and sum(df[c]) == 0]\r\n df.drop(const_columns, axis = 1, inplace = True)\r\n\r\n df = pd.concat([df2,df],axis=1)\r\n new_columns = [c for c in df.columns if c not in original_columns]\r\n return df, new_columns\r\n\r\nglobal outputfilename\r\noutputfilename=''\r\nglobal outputfilepath\r\noutputfilepath=''\r\n\r\ndef setEnvInfo(filepath, filename):\r\n \"\"\"\r\n Configure data file path and file name. \r\n This must be used before other method, as it generate log info \r\n storage.\r\n Parameters\r\n ----------\r\n filepath : string\r\n log file path\r\n filename : string\r\n log file name\r\n Output\r\n -------\r\n Generate path filepath/filename/ to storage result. \r\n \"\"\"\r\n global outputfilename\r\n global outputfilepath\r\n outputfilename = filename\r\n outputfilepath = filepath\r\n if not os.path.exists(outputfilepath):\r\n os.mkdir(outputfilepath) \r\n# outputfilepath = outputfilepath + outputfilename + '/'\r\n# if not os.path.exists(outputfilepath):\r\n# os.mkdir(outputfilepath) \r\n\r\ndef _log(*arg, mode):\r\n global outputfilename\r\n global outputfilepath\r\n if outputfilename == '' or outputfilepath == '':\r\n return \r\n timeline = time.strftime(\"%Y_%m_%d\", time.localtime()) \r\n with open(outputfilepath+outputfilename+mode+timeline+'.fillna', \"a+\") as text_file:\r\n print(*arg, file=text_file)\r\n\r\ndef trace(*arg):\r\n _log(*arg, mode='trace')\r\n\r\ndef debug(*arg):\r\n if _DEBUG == True:\r\n _log(*arg, mode = 'debug')\r\n\r\n\r\n \r\ndef process_missing(dataframe, target='', model = 'tree', method='auto', \\\r\n binning_missing_ratio=0.75, neighbour_num=-1):\r\n \"\"\"\r\n General API to processing missing info, including add nan status and impulate nan.\r\n Main entrance of all internal methods for general purpose processing.\r\n \r\n Parameters\r\n ----------\r\n dataframe : pandas.Dataframe\r\n dataframe to trackle missing value\r\n target : string\r\n name of target feature, it's excluded when processing missing\r\n model : options as 'tree', 'regression', 'xgboost'\r\n model type with which to predict target\r\n 'tree' represent decision tree, request result contain no\r\n missing data\r\n 'regression' represent all regression, like logistic regression\r\n or linear regression,\r\n request result contain no missing, no collinear and all \r\n features are of number type\r\n 'xgboost' represent xgboost, lgbm. It has little constraint and \r\n can handle missing data\r\n Method : options as 'auto', 'mean', 'knn', 'decision tree', 'bin',\r\n 'bayes','lgbm'\r\n missing value impulate method.\r\n binning_missing_ratio : float, (0, 1]\r\n specify the missing ratio threshold above which binning will be\r\n adopted to treat missing as a category\r\n neighbour_num : int, large than 0 and less than dataframe size.\r\n -1 means auto generated value. \r\n specify KNN neighbour number and only useful when method is \r\n 'auto' or 'knn'\r\n \r\n Return\r\n -------\r\n dataframe after process missing values from all features expect target\r\n \"\"\"\r\n if outputfilename != '' or outputfilepath != '':\r\n dataframe.to_csv(outputfilepath+outputfilename+'.in.csv', index= False)\r\n dataframe = add_nan_ratio(dataframe)\r\n dataframe = fill_nan(dataframe, target=target, model=model, method=method, \\\r\n binning_missing_ratio=binning_missing_ratio, \\\r\n neighbour_num=neighbour_num)\r\n# dataframe = impute_status(dataframe)\r\n if outputfilename != '' or outputfilepath != '':\r\n dataframe.to_csv(outputfilepath+outputfilename+'.out.csv', index= False)\r\nNAN_STATUS = '_is_nan'\r\ndef impute_status(dataframe, feature=''):\r\n \"\"\"\r\n Add missing status for features from dataframe\r\n \r\n Parameters\r\n ----------\r\n dataframe : pandas.Dataframe\r\n dataframe to process\r\n \r\n feature : string, option\r\n specify feature name whose missing status is added as new \r\n column. if not specified, missing status of all features are \r\n added\r\n\r\n Return\r\n -------\r\n dataframe with new missing status feature(s)\r\n \"\"\"\r\n df = dataframe.copy(deep=True)\r\n nan_num = df.isnull().sum()\r\n# if feature != '' and nan_num[feature] > 0:\r\n if feature != '':\r\n feature_nan = df[feature].isnull()\r\n return feature_nan\r\n else :\r\n for f_ in df.columns:\r\n if nan_num[f_] > 0:\r\n df[f_+NAN_STATUS] = df[f_].isnull()\r\n return df\r\n\r\n\r\ndef fill_nan(dataframe, target='', model = 'tree', method='auto', binning_missing_ratio=0.7, \\\r\n neighbour_num=-1):\r\n \"\"\"\r\n Fill missing values inside dataframe with some impute algorithm\r\n \r\n Parameters\r\n ----------\r\n dataframe : pandas.Dataframe\r\n dataframe to impute missing value\r\n \r\n target : string\r\n name of target feature, it's excluded when processing missing\r\n model : options as 'tree', 'regression', 'xgboost'\r\n model type with which to predict target\r\n 'tree' represent decision tree, request result contain no\r\n missing data\r\n 'regression' represent all regression, like logistic regression\r\n or linear regression,\r\n request result contain no missing, no collinear and all \r\n features are of number type\r\n 'xgboost' represent xgboost, lgbm. It has little constraint and \r\n can handle missing data\r\n \r\n Method : 'auto', 'mean', 'knn', 'decision tree', 'bin','bayes','lgbm',\r\n 'hardcode'\r\n 'auto', adopt a relatively complex method to impute missing \r\n value. It consider missing s\r\n \r\n 'mean' fill mean value for number feature, normal value for \r\n category value.\r\n 'knn' fill missing with KNN method. neighbour_num specify the \r\n nearest neighbour number. \r\n 'decision tree' fill missig with decision tree algorithm. \r\n 'bayes' use bayes method to fill.\r\n 'lgbm' adopt lgbm method to predict missing\r\n 'hardcode' fills nominal feature with missing category and fill\r\n number feature with some outlier value\r\n \r\n binning_missing_ratio :\r\n float, between (0,1) \r\n Specify the threshold above which binning this feature.\r\n \r\n neighbour_num : \r\n int, above 1 and less than dataframe size\r\n Special neighbout number of KNN. -1 means auto.\r\n \r\n Return\r\n -------\r\n dataframe with filled feature after process\r\n \"\"\"\r\n dataframe_int = dataframe.copy(deep=True)\r\n nan_num = dataframe_int.isnull().sum().sort_values(ascending=True)\r\n percent = dataframe_int.isnull().sum()/dataframe_int.shape[0]\r\n\r\n # TODO Improve if impulate in loops for better imputed other features\r\n \r\n for f_ in nan_num.index:\r\n if nan_num[f_] == 0:\r\n continue;\r\n\r\n # binning if binning_missing_ratio is setup\r\n trace(f_+' missing ratio: '+str(percent[f_]))\r\n if percent[f_] > binning_missing_ratio:\r\n dataframe_int = binning_feature(dataframe_int, f_)\r\n continue\r\n # fill nan with hardcode if it's inexistence\r\n if check_inexistence_nan(dataframe_int, f_, target):\r\n dataframe_int = impute_hardcode(dataframe_int, f_)\r\n continue\r\n # filt features if nan status is highly correlated, which would mislead\r\n # impute\r\n df_train = filt_nan_hi_corr_feature(dataframe_int, f_, target) \r\n\r\n if method == 'auto':\r\n df, acc = impute_auto(df_train, f_,target)\r\n elif method == 'mean':\r\n df,acc = impute_mean(df_train, f_)\r\n elif method == 'knn':\r\n df,acc = impute_knn(df_train, f_, target)\r\n elif method == 'lgbm':\r\n df,acc = impute_lgbm(df_train, f_, target)\r\n elif method == 'randomforest':\r\n df,acc = impute_random_forest(df_train, f_, target)\r\n# elif method == 'decision tree':\r\n# df,acc = impute_decisiontree(df_train, f_, target)\r\n# elif method == 'bayes':\r\n# df,acc = impute_bayes(df_train, f_, target)\r\n \r\n elif method == 'hardcode':\r\n df = impute_hardcode(df_train, f_)\r\n elif method == 'bin':\r\n df = binning_feature(df_train, f_)\r\n \r\n dataframe_int[f_] = df[f_]\r\n\r\n return dataframe_int\r\n\r\nNAN_RATIO_FEATURE = 'na_ratio'\r\ndef add_nan_ratio(dataframe):\r\n \"\"\"\r\n Add one more feature, present missing ratio. \r\n If no missing at all, nothing change.\r\n \r\n Parameters\r\n ----------\r\n dataframe : pandas.Dataframe\r\n \r\n Return\r\n -------\r\n dataframe with missing_ratio feature after process\r\n \"\"\"\r\n df = dataframe\r\n df[NAN_RATIO_FEATURE] = df.isnull().sum(axis=1)/(df.shape[0])*100\r\n return df\r\n \r\nBINNING_NUM = 100\r\ndef binning_feature(dataframe, feature):\r\n df = impute_hardcode(dataframe, feature)\r\n if df[feature].dtype != 'object':\r\n df[feature] = pd.qcut(df[feature],BINNING_NUM,duplicates='drop') \r\n return df\r\n\r\n# check if inexistence corr, or inexist & value corr\r\n# if fillna, similar with inexistence corr.\r\nINEXISTENCE_NAN_SYNC_RATIO = 0.5\r\nINEXISTENCE_NAN_CORR_RATIO = 0.98\r\nINEXISTENCE_NAN_NUMBER2NOMINAL_NUM = 30\r\nINEXISTENCE_NAN_NUMBER2NOMINAL_RATIO = 1/10\r\ndef check_inexistence_nan(dataframe, feature, target):\r\n feature_nan = dataframe[feature].isnull()\r\n# total_len = dataframe.shape[0]\r\n featue_nan_len = sum(feature_nan)\r\n if featue_nan_len == 0:\r\n return False\r\n \r\n for feature_test in dataframe.columns:\r\n if feature_test == feature or feature_test == target:\r\n continue\r\n \r\n# if feature_test == 'ELEVATORS_MODE':\r\n# feature_test = feature_test\r\n \r\n f_temp = dataframe[feature_test]\r\n df = pd.concat([feature_nan,f_temp],axis=1)\r\n df.columns = [feature,feature_test]\r\n# f_nan = dataframe[feature_test].isnull()\r\n# df = pd.concat([df,f_nan],axis=1)\r\n# df.columns = [feature,feature_test,'feature_test_nan']\r\n df.dropna(subset=[feature_test],inplace=True)\r\n featue_nan_len2 = sum(df[feature]) \r\n# if featue_nan_len2/featue_nan_len < INEXISTENCE_NAN_SYNC_RATIO\r\n if featue_nan_len2 == 0:\r\n continue\r\n \r\n # transform number2Str which behaviors as category\r\n if df[feature_test].dtype != 'object':\r\n value_count = df[feature_test].value_counts()\r\n if len(value_count) < INEXISTENCE_NAN_NUMBER2NOMINAL_NUM \\\r\n and len(value_count)/df.shape[0] < INEXISTENCE_NAN_NUMBER2NOMINAL_RATIO:\r\n df[feature_test] = df[feature_test].apply(lambda x:str(x))\r\n debug(feature+' inexistant with' + feature_test + ' value count: '+str(value_count)+' Ratio: '+str(len(value_count)/df.shape[0]))\r\n \r\n df_feature_nan = df[df[feature]==True]\r\n # binning number which range contain feature_nan (95%)\r\n '''\r\n #TODO consider more\r\n if df[feature_test].dtype != 'object':\r\n range_max = max(df_feature_nan[feature_test])\r\n range_min = min(df_feature_nan[feature_test])\r\n #TODO update with confidence interval\r\n df[feature_test] = df[feature_test].apply(lambda x:\\\r\n 'nan_stuff' if x >= range_min and x <= range_max else 'not_nan')\r\n df_feature_nan = df[df[feature]==True]\r\n '''\r\n # Here all feature_test are of object type\r\n # binning cat which contain most(95%) feature_na\r\n if df_feature_nan[feature_test].dtype == 'object':\r\n df_feature_nan_ratio = df_feature_nan[feature_test].value_counts().sort_values(ascending = False)\\\r\n /df_feature_nan.shape[0]\r\n count = 0\r\n value_list = []\r\n for value_index in df_feature_nan_ratio.index:\r\n if df_feature_nan_ratio.index.dtype != 'object':\r\n value_index = str(value_index) \r\n count = count + df_feature_nan_ratio.loc[value_index]\r\n value_list.append(value_index)\r\n if count >= INEXISTENCE_NAN_CORR_RATIO:\r\n break\r\n debug(feature+' inexistant with '+feature_test+' ratio count: '+str(count))\r\n df_nan_len = df_feature_nan.shape[0]\r\n\r\n\r\n index_list = [index for index in df.index if df[feature_test][index] in value_list]\r\n df_in_value = df.loc[index_list]\r\n df_in_value_len = df_in_value.shape[0]\r\n test_values_ratio = df_nan_len / df_in_value_len\r\n\r\n debug(feature+' inexistant with '+feature_test+' test ratio: ---------- '+str(test_values_ratio))\r\n if test_values_ratio > INEXISTENCE_NAN_CORR_RATIO:\r\n trace('inexistant '+feature+' -> '+feature_test+' test ratio: '+str(test_values_ratio))\r\n return True\r\n return False\r\n\r\nNAN_INT_RATIO = 0.50\r\nNAN_INT_EXT_RATIO = 1.5\r\ndef filt_nan_hi_corr_feature(dataframe, feature, target):\r\n df_nan = impute_status(dataframe)\r\n nan_status_list = [f_ for f_ in df_nan.columns if NAN_STATUS in f_ \\\r\n and target not in f_ and feature not in f_]\r\n feature_nan = df_nan[feature+NAN_STATUS]\r\n feature_data = 1 - feature_nan\r\n nan_hi_corr = []\r\n for f_ in nan_status_list:\r\n internal_sub_nan = df_nan[f_] & feature_nan\r\n internal_nan_ratio = sum(internal_sub_nan)/sum(feature_nan)\r\n external_sub_nan = df_nan[f_] & feature_data\r\n external_nan_ratio = sum(external_sub_nan)/sum(feature_data)\r\n if internal_nan_ratio > NAN_INT_RATIO\\\r\n or internal_nan_ratio/(external_nan_ratio+0.001)>NAN_INT_EXT_RATIO:\r\n nan_hi_corr.append(f_)\r\n debug('nan_hi_corr_feature '+feature+' & '+f_[:-6]+', nan_ratio: '+str(internal_nan_ratio))\r\n debug('nan_hi_corr_feature: int/ext nan_ratio'+str(internal_nan_ratio/(external_nan_ratio+0.001)))\r\n feature_drop = [f_.replace(NAN_STATUS,'') for f_ in nan_hi_corr]\r\n df_train = dataframe.drop(feature_drop,axis=1)\r\n trace('nan_hi_corr_feature '+feature+' ', feature_drop)\r\n return df_train\r\n\r\nAUTO_NAN_RATIO_THRESHOLD_BINNING = 0.50\r\nAUTO_TARGET_OBJECT_ACC = 0.65\r\nAUTO_TARGET_NUMBER_R2 = 0.5\r\ndef impute_auto(dataframe, feature,target, model='tree'):\r\n '''\r\n model tree, regression, xgboost(lgbm)\r\n '''\r\n\r\n # champion prediction\r\n predict = [\\\r\n impute_knn, \\\r\n impute_lgbm, \\\r\n# impute_decisiontree,\\\r\n# impute_bayes,\\\r\n impute_random_forest,\\\r\n impute_mean]\r\n df = pd.DataFrame()\r\n acc = 0\r\n method_index = -1\r\n for index in range(len(predict)):\r\n test_df = dataframe[dataframe[feature].isnull()].drop(feature,axis=1)\r\n df_temp, acc_temp = predict[index](dataframe.copy(deep=True), feature,target)\r\n test_df2 = df_temp[df_temp[feature].isnull()].drop(feature,axis=1)\r\n if test_df2.shape[0] != 0:\r\n trace('impute_auto fail to impute with ' + predict[method_index].__name__)\r\n trace('initial len: ' + str(test_df.shape[0])+' After len: '+str(test_df2.shape[0]))\r\n if acc_temp > acc:\r\n df = df_temp\r\n acc = acc_temp\r\n method_index = index\r\n\r\n trace('impute_auto feature: '+feature+predict[method_index].__name__+' Accuracy:'+str(acc)) \r\n temp = dataframe.copy(deep=True)\r\n temp[feature] = df[feature]\r\n df = temp \r\n \r\n if df[feature].dtype == 'object' and acc > AUTO_TARGET_OBJECT_ACC:\r\n acc = acc\r\n elif acc > AUTO_TARGET_NUMBER_R2:\r\n acc = acc\r\n else:\r\n percent = dataframe[[feature]].isnull().sum()/dataframe[[feature]].shape[0]\r\n if model == 'regression' and percent[feature] > AUTO_NAN_RATIO_THRESHOLD_BINNING:\r\n df = binning_feature(dataframe,feature)\r\n trace('impute_auto binning feature:'+feature) \r\n elif model == 'tree':\r\n trace('impute_auto hardcode feature:'+feature) \r\n df = impute_hardcode(dataframe,feature)\r\n return df,acc\r\n\r\n\r\ndef impute_mean(dataframe, feature, target='', intern=False):\r\n df = dataframe\r\n mean = 0\r\n if df[feature].dtype == 'object':\r\n mean = df[feature].fillna(FILL_NA).mode().values[0]\r\n else:\r\n sample_num = min(dataframe.shape[0], 1000)\r\n variable_num = df[feature].sample(sample_num).nunique() \r\n if variable_num < 10 and sample_num/variable_num >100:\r\n mean = df[feature].mode().values[0]\r\n else:\r\n mean = df[feature].mean()\r\n if df[feature].dtype == 'int':\r\n mean = round(mean)\r\n \r\n df_pfm = pd.DataFrame()\r\n df_pfm['original'] = dataframe[feature]\r\n df_pfm['filled'] = mean\r\n df_pfm = df_pfm.loc[df_pfm['original'].notnull()]\r\n acc = 0\r\n if intern == False:\r\n acc = fast_model.prediction_score(df_pfm, 'original', 'filled')\r\n \r\n debug('impute_mean intern:' + str(intern) + feature+' acc: '+ str(acc))\r\n df[feature].fillna(mean, inplace=True)\r\n return df,acc\r\n\r\n\r\ndef impute_knn(dataframe, feature, target):\r\n return _impute_x_model(fast_model.int_module_knn, \\\r\n fast_model.int_module_knn_param, \\\r\n dataframe, feature, target)\r\n\r\ndef impute_lgbm(dataframe, feature, target):\r\n return _impute_x_model(fast_model.int_module_lgbm_cv, \\\r\n fast_model.int_module_lgbm_param, \\\r\n dataframe, feature, target)\r\n\r\n\r\ndef impute_random_forest(dataframe, feature, target):\r\n return _impute_x_model(fast_model.int_module_random_forest, \\\r\n fast_model.int_module_random_forest_param, \\\r\n dataframe, feature, target)\r\n \r\ndef _impute_x_model(model, param, dataframe, feature, target):\r\n df = dataframe\r\n is_classifier = fast_model._is_classifier(dataframe, feature)\r\n parameter = param(is_classifier)\r\n\r\n feature_list = [f_ for f_ in dataframe.columns if f_ != feature and f_ != target]\r\n train = df[feature_list]\r\n train, _ = one_hot_encoder(train, True)\r\n for f_ in train.columns:\r\n train, _=impute_mean(train, f_,intern = True)\r\n\r\n train = pd.concat([train, df[feature]],axis=1)\r\n train_df = train[train[feature].notnull()]\r\n test_df = train[train[feature].isnull()].drop(feature,axis=1)\r\n test_target = train[train[feature].isnull()][[feature]]\r\n \r\n test_target.loc[:, feature], acc = model(parameter, dataframe=train_df, \\\r\n target=feature, test_dataframe=test_df)\r\n if test_target.shape[0]>0:\r\n df.loc[test_target.index, feature] = test_target.loc[:, feature]\r\n \r\n trace('impute model: '+model.__name__+' feature:' + feature + ' acc: '+str(acc)) \r\n return df, acc\r\n\r\nNAN_DISTANCE = -2\r\ndef impute_hardcode(dataframe, feature):\r\n df = dataframe\r\n if df[feature].dtype == 'object':\r\n df[feature].fillna(FILL_NA, inplace=True)\r\n else:\r\n mean = dataframe[feature].dropna().mean()\r\n factor = NAN_DISTANCE if mean>=0 else NAN_DISTANCE*-1\r\n fill_na = factor*max(abs(dataframe[feature]))\r\n df[feature].fillna(fill_na,inplace=True) \r\n return df\r\n","repo_name":"woodsgoing/impulate-missing-values","sub_path":"fast_impute.py","file_name":"fast_impute.py","file_ext":"py","file_size_in_byte":21186,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"6146010091","text":"import os,sys,inspect\ncurrent_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparent_dir = os.path.dirname(current_dir)\nsys.path.insert(0, parent_dir)\n\nimport numpy as np\nfrom mime import *\nfrom lime import lime_tabular\nimport pmlb\nimport pandas as pd\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.preprocessing import OneHotEncoder\nfrom scipy.stats import spearmanr, pearsonr\n\n# Fetch the data\ndata = pmlb.fetch_data(\"churn\")\ncategorical = [0,2,3,4,5]\ndata = data.sample(frac=1.0)\ndata_x, data_y = data.drop('target', axis=1), data['target']\n\ntrain_x = data_x.sample(frac=0.7)\ntrain_y = data_y.reindex(train_x.index)\ntest_x = data_x.drop(train_x.index)\ntest_y = data_y.reindex(test_x.index)\ntest_x.index.intersection(train_x)\nparams = {\n 'criterion' : ['gini', 'entropy'],\n 'n_estimators' : [10*c for c in range(1,5)]\n}\n# Training a SVC\nclassifier = RandomForestClassifier()\nsearch = GridSearchCV(classifier, param_grid=params, scoring=\"recall\")\nsearch.fit(train_x, train_y)\n# Final score is\nprint(search.best_score_)\nprint(search.best_estimator_.score(test_x, test_y))\nclassifier = search.best_estimator_\nmime_explainer = Mime(data_x, data_y, categorical)\ninstance = data_x.sample(1)\nimportances, prediction = mime_explainer.explain(instance.values[0], classifier.predict)\nprint(importances, prediction)\nlime_explainer = lime_tabular.LimeTabularExplainer(data_x.values, feature_names=data_x.columns,\n class_names=['no-churn', 'churn'],\n categorical_features=categorical)\nexp = lime_explainer.explain_instance(instance.values[0], classifier.predict_proba, top_labels=1, num_features=100)\n\ndef rank_order(lizt):\n order = [(ind, value) for ind, value in enumerate(lizt)]\n order.sort(key=lambda x:x[1], reverse=True)\n return order\n\nord_mime = rank_order(importances)\nprint(ord_mime)\nord_lime = exp.as_map()[0]\nprint(ord_lime)\n# plt.xticks(range(len(ord_mime)))\n# plt.yticks(range(len(ord_mime)))\n# plt.scatter([v[0] for v in ord_mime], [v[0] for v in exp.as_map()[0]])\n# plt.show()\n# pearsonr([v[0] for v in ord_mime], [v[0] for v in exp.as_map().popitem()[1]])\n\ndef calculate_rho_instance(instance):\n importances, prediction = mime_explainer.explain(instance[0], classifier.predict)\n exp = lime_explainer.explain_instance(instance[0], classifier.predict_proba, top_labels=1, num_features=100)\n ord_mime = rank_order(importances)\n return pearsonr([v[0] for v in ord_mime], [v[0] for v in exp.as_map().popitem()[1]])\n\ndata_explanations = data_x.sample(500)\nrhos = []\nconfidences = []\n\nfor instance in data_explanations.values:\n instance = np.array([instance])\n rho, confidence = calculate_rho_instance(instance)\n rhos.append(rho)\n confidences.append(confidence)\n\nprint(sum(rhos)/len(rhos))","repo_name":"victorgcapone/estudo-dirigido","sub_path":"codigo/lime_comparison.py","file_name":"lime_comparison.py","file_ext":"py","file_size_in_byte":2953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38674230012","text":"from urlparse import urlparse\n\ndef sanitize(obj):\n\n if 'lastUpdateDate' in obj:\n obj['lastUpdateDate'] = obj['lastUpdateDate'].isoformat()\n if 'creationDate' in obj:\n obj['creationDate'] = obj['creationDate'].isoformat()\n if '_id' in obj:\n obj['_id'] = str(obj['_id'])\n\n return obj\n\ndef url_to_objloc(url):\n o = urlparse(url)\n p = o.path.split(\"/\") \n if len(p) > 1:\n c = p[-2]\n id = p[-1]\n return(c, id)\n else:\n return None\n","repo_name":"saibaba/ecommerce","sub_path":"ecommerce/eutil.py","file_name":"eutil.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"}